From 50cc36fbfced0b338d6352411e2363bb7ff6d2d6 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Tue, 21 Nov 2017 12:38:55 +0100 Subject: [PATCH] Improved documentation of VmaAllocatorCreateInfo::pHeapSizeLimit. --- .../struct_vma_allocator_create_info.html | 3 +- docs/html/vk__mem__alloc_8h_source.html | 148 +++++++++--------- src/vk_mem_alloc.h | 6 + 3 files changed, 82 insertions(+), 75 deletions(-) diff --git a/docs/html/struct_vma_allocator_create_info.html b/docs/html/struct_vma_allocator_create_info.html index 06a1521..7b8f5f5 100644 --- a/docs/html/struct_vma_allocator_create_info.html +++ b/docs/html/struct_vma_allocator_create_info.html @@ -211,8 +211,9 @@ Public Attributes

If there is a limit defined for a heap:

+

Warning! Using this feature may not be equivalent to installing a GPU with smaller amount of memory, because graphics driver doesn't necessary fail new allocations with VK_ERROR_OUT_OF_DEVICE_MEMORY result when memory capacity is exceeded. It may return success and just silently migrate some device memory blocks to system RAM.

diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 327df23..82a48d2 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,154 +62,154 @@ $(function() {
vk_mem_alloc.h
-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
594 #include <vulkan/vulkan.h>
595 
596 VK_DEFINE_HANDLE(VmaAllocator)
597 
598 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
600  VmaAllocator allocator,
601  uint32_t memoryType,
602  VkDeviceMemory memory,
603  VkDeviceSize size);
605 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
606  VmaAllocator allocator,
607  uint32_t memoryType,
608  VkDeviceMemory memory,
609  VkDeviceSize size);
610 
618 typedef struct VmaDeviceMemoryCallbacks {
624 
660 
663 typedef VkFlags VmaAllocatorCreateFlags;
664 
669 typedef struct VmaVulkanFunctions {
670  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
671  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
672  PFN_vkAllocateMemory vkAllocateMemory;
673  PFN_vkFreeMemory vkFreeMemory;
674  PFN_vkMapMemory vkMapMemory;
675  PFN_vkUnmapMemory vkUnmapMemory;
676  PFN_vkBindBufferMemory vkBindBufferMemory;
677  PFN_vkBindImageMemory vkBindImageMemory;
678  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
679  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
680  PFN_vkCreateBuffer vkCreateBuffer;
681  PFN_vkDestroyBuffer vkDestroyBuffer;
682  PFN_vkCreateImage vkCreateImage;
683  PFN_vkDestroyImage vkDestroyImage;
684  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
685  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
687 
690 {
692  VmaAllocatorCreateFlags flags;
694 
695  VkPhysicalDevice physicalDevice;
697 
698  VkDevice device;
700 
703 
706 
707  const VkAllocationCallbacks* pAllocationCallbacks;
709 
724  uint32_t frameInUseCount;
742  const VkDeviceSize* pHeapSizeLimit;
756 
758 VkResult vmaCreateAllocator(
759  const VmaAllocatorCreateInfo* pCreateInfo,
760  VmaAllocator* pAllocator);
761 
764  VmaAllocator allocator);
765 
771  VmaAllocator allocator,
772  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
773 
779  VmaAllocator allocator,
780  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
781 
789  VmaAllocator allocator,
790  uint32_t memoryTypeIndex,
791  VkMemoryPropertyFlags* pFlags);
792 
802  VmaAllocator allocator,
803  uint32_t frameIndex);
804 
807 typedef struct VmaStatInfo
808 {
810  uint32_t blockCount;
812  uint32_t allocationCount;
816  VkDeviceSize usedBytes;
818  VkDeviceSize unusedBytes;
819  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
820  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
821 } VmaStatInfo;
822 
824 typedef struct VmaStats
825 {
826  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
827  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
829 } VmaStats;
830 
832 void vmaCalculateStats(
833  VmaAllocator allocator,
834  VmaStats* pStats);
835 
836 #define VMA_STATS_STRING_ENABLED 1
837 
838 #if VMA_STATS_STRING_ENABLED
839 
841 
844  VmaAllocator allocator,
845  char** ppStatsString,
846  VkBool32 detailedMap);
847 
848 void vmaFreeStatsString(
849  VmaAllocator allocator,
850  char* pStatsString);
851 
852 #endif // #if VMA_STATS_STRING_ENABLED
853 
854 VK_DEFINE_HANDLE(VmaPool)
855 
856 typedef enum VmaMemoryUsage
857 {
863 
866 
869 
873 
888 
938 
941 typedef VkFlags VmaAllocationCreateFlags;
942 
944 {
946  VmaAllocationCreateFlags flags;
957  VkMemoryPropertyFlags requiredFlags;
963  VkMemoryPropertyFlags preferredFlags;
965  void* pUserData;
970  VmaPool pool;
972 
987 VkResult vmaFindMemoryTypeIndex(
988  VmaAllocator allocator,
989  uint32_t memoryTypeBits,
990  const VmaAllocationCreateInfo* pAllocationCreateInfo,
991  uint32_t* pMemoryTypeIndex);
992 
994 typedef enum VmaPoolCreateFlagBits {
1013 
1016 typedef VkFlags VmaPoolCreateFlags;
1017 
1020 typedef struct VmaPoolCreateInfo {
1026  VmaPoolCreateFlags flags;
1031  VkDeviceSize blockSize;
1060 
1063 typedef struct VmaPoolStats {
1066  VkDeviceSize size;
1069  VkDeviceSize unusedSize;
1082  VkDeviceSize unusedRangeSizeMax;
1083 } VmaPoolStats;
1084 
1091 VkResult vmaCreatePool(
1092  VmaAllocator allocator,
1093  const VmaPoolCreateInfo* pCreateInfo,
1094  VmaPool* pPool);
1095 
1098 void vmaDestroyPool(
1099  VmaAllocator allocator,
1100  VmaPool pool);
1101 
1108 void vmaGetPoolStats(
1109  VmaAllocator allocator,
1110  VmaPool pool,
1111  VmaPoolStats* pPoolStats);
1112 
1120  VmaAllocator allocator,
1121  VmaPool pool,
1122  size_t* pLostAllocationCount);
1123 
1124 VK_DEFINE_HANDLE(VmaAllocation)
1125 
1126 
1128 typedef struct VmaAllocationInfo {
1133  uint32_t memoryType;
1142  VkDeviceMemory deviceMemory;
1147  VkDeviceSize offset;
1152  VkDeviceSize size;
1166  void* pUserData;
1168 
1179 VkResult vmaAllocateMemory(
1180  VmaAllocator allocator,
1181  const VkMemoryRequirements* pVkMemoryRequirements,
1182  const VmaAllocationCreateInfo* pCreateInfo,
1183  VmaAllocation* pAllocation,
1184  VmaAllocationInfo* pAllocationInfo);
1185 
1193  VmaAllocator allocator,
1194  VkBuffer buffer,
1195  const VmaAllocationCreateInfo* pCreateInfo,
1196  VmaAllocation* pAllocation,
1197  VmaAllocationInfo* pAllocationInfo);
1198 
1200 VkResult vmaAllocateMemoryForImage(
1201  VmaAllocator allocator,
1202  VkImage image,
1203  const VmaAllocationCreateInfo* pCreateInfo,
1204  VmaAllocation* pAllocation,
1205  VmaAllocationInfo* pAllocationInfo);
1206 
1208 void vmaFreeMemory(
1209  VmaAllocator allocator,
1210  VmaAllocation allocation);
1211 
1214  VmaAllocator allocator,
1215  VmaAllocation allocation,
1216  VmaAllocationInfo* pAllocationInfo);
1217 
1232  VmaAllocator allocator,
1233  VmaAllocation allocation,
1234  void* pUserData);
1235 
1247  VmaAllocator allocator,
1248  VmaAllocation* pAllocation);
1249 
1284 VkResult vmaMapMemory(
1285  VmaAllocator allocator,
1286  VmaAllocation allocation,
1287  void** ppData);
1288 
1293 void vmaUnmapMemory(
1294  VmaAllocator allocator,
1295  VmaAllocation allocation);
1296 
1298 typedef struct VmaDefragmentationInfo {
1303  VkDeviceSize maxBytesToMove;
1310 
1312 typedef struct VmaDefragmentationStats {
1314  VkDeviceSize bytesMoved;
1316  VkDeviceSize bytesFreed;
1322 
1399 VkResult vmaDefragment(
1400  VmaAllocator allocator,
1401  VmaAllocation* pAllocations,
1402  size_t allocationCount,
1403  VkBool32* pAllocationsChanged,
1404  const VmaDefragmentationInfo *pDefragmentationInfo,
1405  VmaDefragmentationStats* pDefragmentationStats);
1406 
1433 VkResult vmaCreateBuffer(
1434  VmaAllocator allocator,
1435  const VkBufferCreateInfo* pBufferCreateInfo,
1436  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1437  VkBuffer* pBuffer,
1438  VmaAllocation* pAllocation,
1439  VmaAllocationInfo* pAllocationInfo);
1440 
1452 void vmaDestroyBuffer(
1453  VmaAllocator allocator,
1454  VkBuffer buffer,
1455  VmaAllocation allocation);
1456 
1458 VkResult vmaCreateImage(
1459  VmaAllocator allocator,
1460  const VkImageCreateInfo* pImageCreateInfo,
1461  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1462  VkImage* pImage,
1463  VmaAllocation* pAllocation,
1464  VmaAllocationInfo* pAllocationInfo);
1465 
1477 void vmaDestroyImage(
1478  VmaAllocator allocator,
1479  VkImage image,
1480  VmaAllocation allocation);
1481 
1482 #ifdef __cplusplus
1483 }
1484 #endif
1485 
1486 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1487 
1488 // For Visual Studio IntelliSense.
1489 #ifdef __INTELLISENSE__
1490 #define VMA_IMPLEMENTATION
1491 #endif
1492 
1493 #ifdef VMA_IMPLEMENTATION
1494 #undef VMA_IMPLEMENTATION
1495 
1496 #include <cstdint>
1497 #include <cstdlib>
1498 #include <cstring>
1499 
1500 /*******************************************************************************
1501 CONFIGURATION SECTION
1502 
1503 Define some of these macros before each #include of this header or change them
1504 here if you need other then default behavior depending on your environment.
1505 */
1506 
1507 /*
1508 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1509 internally, like:
1510 
1511  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1512 
1513 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1514 VmaAllocatorCreateInfo::pVulkanFunctions.
1515 */
1516 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
1517 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1518 #endif
1519 
1520 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1521 //#define VMA_USE_STL_CONTAINERS 1
1522 
1523 /* Set this macro to 1 to make the library including and using STL containers:
1524 std::pair, std::vector, std::list, std::unordered_map.
1525 
1526 Set it to 0 or undefined to make the library using its own implementation of
1527 the containers.
1528 */
1529 #if VMA_USE_STL_CONTAINERS
1530  #define VMA_USE_STL_VECTOR 1
1531  #define VMA_USE_STL_UNORDERED_MAP 1
1532  #define VMA_USE_STL_LIST 1
1533 #endif
1534 
1535 #if VMA_USE_STL_VECTOR
1536  #include <vector>
1537 #endif
1538 
1539 #if VMA_USE_STL_UNORDERED_MAP
1540  #include <unordered_map>
1541 #endif
1542 
1543 #if VMA_USE_STL_LIST
1544  #include <list>
1545 #endif
1546 
1547 /*
1548 Following headers are used in this CONFIGURATION section only, so feel free to
1549 remove them if not needed.
1550 */
1551 #include <cassert> // for assert
1552 #include <algorithm> // for min, max
1553 #include <mutex> // for std::mutex
1554 #include <atomic> // for std::atomic
1555 
1556 #if !defined(_WIN32)
1557  #include <malloc.h> // for aligned_alloc()
1558 #endif
1559 
1560 // Normal assert to check for programmer's errors, especially in Debug configuration.
1561 #ifndef VMA_ASSERT
1562  #ifdef _DEBUG
1563  #define VMA_ASSERT(expr) assert(expr)
1564  #else
1565  #define VMA_ASSERT(expr)
1566  #endif
1567 #endif
1568 
1569 // Assert that will be called very often, like inside data structures e.g. operator[].
1570 // Making it non-empty can make program slow.
1571 #ifndef VMA_HEAVY_ASSERT
1572  #ifdef _DEBUG
1573  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1574  #else
1575  #define VMA_HEAVY_ASSERT(expr)
1576  #endif
1577 #endif
1578 
1579 #ifndef VMA_NULL
1580  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1581  #define VMA_NULL nullptr
1582 #endif
1583 
1584 #ifndef VMA_ALIGN_OF
1585  #define VMA_ALIGN_OF(type) (__alignof(type))
1586 #endif
1587 
1588 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1589  #if defined(_WIN32)
1590  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1591  #else
1592  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1593  #endif
1594 #endif
1595 
1596 #ifndef VMA_SYSTEM_FREE
1597  #if defined(_WIN32)
1598  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1599  #else
1600  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1601  #endif
1602 #endif
1603 
1604 #ifndef VMA_MIN
1605  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1606 #endif
1607 
1608 #ifndef VMA_MAX
1609  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1610 #endif
1611 
1612 #ifndef VMA_SWAP
1613  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1614 #endif
1615 
1616 #ifndef VMA_SORT
1617  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1618 #endif
1619 
1620 #ifndef VMA_DEBUG_LOG
1621  #define VMA_DEBUG_LOG(format, ...)
1622  /*
1623  #define VMA_DEBUG_LOG(format, ...) do { \
1624  printf(format, __VA_ARGS__); \
1625  printf("\n"); \
1626  } while(false)
1627  */
1628 #endif
1629 
1630 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1631 #if VMA_STATS_STRING_ENABLED
1632  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1633  {
1634  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1635  }
1636  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1637  {
1638  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1639  }
1640  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1641  {
1642  snprintf(outStr, strLen, "%p", ptr);
1643  }
1644 #endif
1645 
1646 #ifndef VMA_MUTEX
1647  class VmaMutex
1648  {
1649  public:
1650  VmaMutex() { }
1651  ~VmaMutex() { }
1652  void Lock() { m_Mutex.lock(); }
1653  void Unlock() { m_Mutex.unlock(); }
1654  private:
1655  std::mutex m_Mutex;
1656  };
1657  #define VMA_MUTEX VmaMutex
1658 #endif
1659 
1660 /*
1661 If providing your own implementation, you need to implement a subset of std::atomic:
1662 
1663 - Constructor(uint32_t desired)
1664 - uint32_t load() const
1665 - void store(uint32_t desired)
1666 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1667 */
1668 #ifndef VMA_ATOMIC_UINT32
1669  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1670 #endif
1671 
1672 #ifndef VMA_BEST_FIT
1673 
1685  #define VMA_BEST_FIT (1)
1686 #endif
1687 
1688 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
1689 
1693  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
1694 #endif
1695 
1696 #ifndef VMA_DEBUG_ALIGNMENT
1697 
1701  #define VMA_DEBUG_ALIGNMENT (1)
1702 #endif
1703 
1704 #ifndef VMA_DEBUG_MARGIN
1705 
1709  #define VMA_DEBUG_MARGIN (0)
1710 #endif
1711 
1712 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1713 
1717  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1718 #endif
1719 
1720 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1721 
1725  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1726 #endif
1727 
1728 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1729  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1731 #endif
1732 
1733 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1734  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1736 #endif
1737 
1738 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1739  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1741 #endif
1742 
1743 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1744 
1745 /*******************************************************************************
1746 END OF CONFIGURATION
1747 */
1748 
1749 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1750  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1751 
1752 // Returns number of bits set to 1 in (v).
1753 static inline uint32_t CountBitsSet(uint32_t v)
1754 {
1755  uint32_t c = v - ((v >> 1) & 0x55555555);
1756  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1757  c = ((c >> 4) + c) & 0x0F0F0F0F;
1758  c = ((c >> 8) + c) & 0x00FF00FF;
1759  c = ((c >> 16) + c) & 0x0000FFFF;
1760  return c;
1761 }
1762 
1763 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1764 // Use types like uint32_t, uint64_t as T.
1765 template <typename T>
1766 static inline T VmaAlignUp(T val, T align)
1767 {
1768  return (val + align - 1) / align * align;
1769 }
1770 
1771 // Division with mathematical rounding to nearest number.
1772 template <typename T>
1773 inline T VmaRoundDiv(T x, T y)
1774 {
1775  return (x + (y / (T)2)) / y;
1776 }
1777 
1778 #ifndef VMA_SORT
1779 
1780 template<typename Iterator, typename Compare>
1781 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1782 {
1783  Iterator centerValue = end; --centerValue;
1784  Iterator insertIndex = beg;
1785  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1786  {
1787  if(cmp(*memTypeIndex, *centerValue))
1788  {
1789  if(insertIndex != memTypeIndex)
1790  {
1791  VMA_SWAP(*memTypeIndex, *insertIndex);
1792  }
1793  ++insertIndex;
1794  }
1795  }
1796  if(insertIndex != centerValue)
1797  {
1798  VMA_SWAP(*insertIndex, *centerValue);
1799  }
1800  return insertIndex;
1801 }
1802 
1803 template<typename Iterator, typename Compare>
1804 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1805 {
1806  if(beg < end)
1807  {
1808  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1809  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1810  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1811  }
1812 }
1813 
1814 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1815 
1816 #endif // #ifndef VMA_SORT
1817 
1818 /*
1819 Returns true if two memory blocks occupy overlapping pages.
1820 ResourceA must be in less memory offset than ResourceB.
1821 
1822 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1823 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1824 */
1825 static inline bool VmaBlocksOnSamePage(
1826  VkDeviceSize resourceAOffset,
1827  VkDeviceSize resourceASize,
1828  VkDeviceSize resourceBOffset,
1829  VkDeviceSize pageSize)
1830 {
1831  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1832  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1833  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1834  VkDeviceSize resourceBStart = resourceBOffset;
1835  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1836  return resourceAEndPage == resourceBStartPage;
1837 }
1838 
1839 enum VmaSuballocationType
1840 {
1841  VMA_SUBALLOCATION_TYPE_FREE = 0,
1842  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1843  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1844  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1845  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1846  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1847  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1848 };
1849 
1850 /*
1851 Returns true if given suballocation types could conflict and must respect
1852 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1853 or linear image and another one is optimal image. If type is unknown, behave
1854 conservatively.
1855 */
1856 static inline bool VmaIsBufferImageGranularityConflict(
1857  VmaSuballocationType suballocType1,
1858  VmaSuballocationType suballocType2)
1859 {
1860  if(suballocType1 > suballocType2)
1861  {
1862  VMA_SWAP(suballocType1, suballocType2);
1863  }
1864 
1865  switch(suballocType1)
1866  {
1867  case VMA_SUBALLOCATION_TYPE_FREE:
1868  return false;
1869  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1870  return true;
1871  case VMA_SUBALLOCATION_TYPE_BUFFER:
1872  return
1873  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1874  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1875  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1876  return
1877  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1878  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1879  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1880  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1881  return
1882  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1883  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1884  return false;
1885  default:
1886  VMA_ASSERT(0);
1887  return true;
1888  }
1889 }
1890 
1891 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1892 struct VmaMutexLock
1893 {
1894 public:
1895  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1896  m_pMutex(useMutex ? &mutex : VMA_NULL)
1897  {
1898  if(m_pMutex)
1899  {
1900  m_pMutex->Lock();
1901  }
1902  }
1903 
1904  ~VmaMutexLock()
1905  {
1906  if(m_pMutex)
1907  {
1908  m_pMutex->Unlock();
1909  }
1910  }
1911 
1912 private:
1913  VMA_MUTEX* m_pMutex;
1914 };
1915 
1916 #if VMA_DEBUG_GLOBAL_MUTEX
1917  static VMA_MUTEX gDebugGlobalMutex;
1918  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1919 #else
1920  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1921 #endif
1922 
1923 // Minimum size of a free suballocation to register it in the free suballocation collection.
1924 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1925 
1926 /*
1927 Performs binary search and returns iterator to first element that is greater or
1928 equal to (key), according to comparison (cmp).
1929 
1930 Cmp should return true if first argument is less than second argument.
1931 
1932 Returned value is the found element, if present in the collection or place where
1933 new element with value (key) should be inserted.
1934 */
1935 template <typename IterT, typename KeyT, typename CmpT>
1936 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1937 {
1938  size_t down = 0, up = (end - beg);
1939  while(down < up)
1940  {
1941  const size_t mid = (down + up) / 2;
1942  if(cmp(*(beg+mid), key))
1943  {
1944  down = mid + 1;
1945  }
1946  else
1947  {
1948  up = mid;
1949  }
1950  }
1951  return beg + down;
1952 }
1953 
1955 // Memory allocation
1956 
1957 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1958 {
1959  if((pAllocationCallbacks != VMA_NULL) &&
1960  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1961  {
1962  return (*pAllocationCallbacks->pfnAllocation)(
1963  pAllocationCallbacks->pUserData,
1964  size,
1965  alignment,
1966  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1967  }
1968  else
1969  {
1970  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1971  }
1972 }
1973 
1974 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1975 {
1976  if((pAllocationCallbacks != VMA_NULL) &&
1977  (pAllocationCallbacks->pfnFree != VMA_NULL))
1978  {
1979  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1980  }
1981  else
1982  {
1983  VMA_SYSTEM_FREE(ptr);
1984  }
1985 }
1986 
1987 template<typename T>
1988 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1989 {
1990  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1991 }
1992 
1993 template<typename T>
1994 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1995 {
1996  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1997 }
1998 
1999 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2000 
2001 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2002 
2003 template<typename T>
2004 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2005 {
2006  ptr->~T();
2007  VmaFree(pAllocationCallbacks, ptr);
2008 }
2009 
2010 template<typename T>
2011 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2012 {
2013  if(ptr != VMA_NULL)
2014  {
2015  for(size_t i = count; i--; )
2016  {
2017  ptr[i].~T();
2018  }
2019  VmaFree(pAllocationCallbacks, ptr);
2020  }
2021 }
2022 
2023 // STL-compatible allocator.
2024 template<typename T>
2025 class VmaStlAllocator
2026 {
2027 public:
2028  const VkAllocationCallbacks* const m_pCallbacks;
2029  typedef T value_type;
2030 
2031  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2032  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2033 
2034  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2035  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2036 
2037  template<typename U>
2038  bool operator==(const VmaStlAllocator<U>& rhs) const
2039  {
2040  return m_pCallbacks == rhs.m_pCallbacks;
2041  }
2042  template<typename U>
2043  bool operator!=(const VmaStlAllocator<U>& rhs) const
2044  {
2045  return m_pCallbacks != rhs.m_pCallbacks;
2046  }
2047 
2048  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2049 };
2050 
2051 #if VMA_USE_STL_VECTOR
2052 
2053 #define VmaVector std::vector
2054 
2055 template<typename T, typename allocatorT>
2056 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2057 {
2058  vec.insert(vec.begin() + index, item);
2059 }
2060 
2061 template<typename T, typename allocatorT>
2062 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2063 {
2064  vec.erase(vec.begin() + index);
2065 }
2066 
2067 #else // #if VMA_USE_STL_VECTOR
2068 
2069 /* Class with interface compatible with subset of std::vector.
2070 T must be POD because constructors and destructors are not called and memcpy is
2071 used for these objects. */
2072 template<typename T, typename AllocatorT>
2073 class VmaVector
2074 {
2075 public:
2076  typedef T value_type;
2077 
2078  VmaVector(const AllocatorT& allocator) :
2079  m_Allocator(allocator),
2080  m_pArray(VMA_NULL),
2081  m_Count(0),
2082  m_Capacity(0)
2083  {
2084  }
2085 
2086  VmaVector(size_t count, const AllocatorT& allocator) :
2087  m_Allocator(allocator),
2088  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2089  m_Count(count),
2090  m_Capacity(count)
2091  {
2092  }
2093 
2094  VmaVector(const VmaVector<T, AllocatorT>& src) :
2095  m_Allocator(src.m_Allocator),
2096  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2097  m_Count(src.m_Count),
2098  m_Capacity(src.m_Count)
2099  {
2100  if(m_Count != 0)
2101  {
2102  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2103  }
2104  }
2105 
2106  ~VmaVector()
2107  {
2108  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2109  }
2110 
2111  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2112  {
2113  if(&rhs != this)
2114  {
2115  resize(rhs.m_Count);
2116  if(m_Count != 0)
2117  {
2118  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2119  }
2120  }
2121  return *this;
2122  }
2123 
2124  bool empty() const { return m_Count == 0; }
2125  size_t size() const { return m_Count; }
2126  T* data() { return m_pArray; }
2127  const T* data() const { return m_pArray; }
2128 
2129  T& operator[](size_t index)
2130  {
2131  VMA_HEAVY_ASSERT(index < m_Count);
2132  return m_pArray[index];
2133  }
2134  const T& operator[](size_t index) const
2135  {
2136  VMA_HEAVY_ASSERT(index < m_Count);
2137  return m_pArray[index];
2138  }
2139 
2140  T& front()
2141  {
2142  VMA_HEAVY_ASSERT(m_Count > 0);
2143  return m_pArray[0];
2144  }
2145  const T& front() const
2146  {
2147  VMA_HEAVY_ASSERT(m_Count > 0);
2148  return m_pArray[0];
2149  }
2150  T& back()
2151  {
2152  VMA_HEAVY_ASSERT(m_Count > 0);
2153  return m_pArray[m_Count - 1];
2154  }
2155  const T& back() const
2156  {
2157  VMA_HEAVY_ASSERT(m_Count > 0);
2158  return m_pArray[m_Count - 1];
2159  }
2160 
2161  void reserve(size_t newCapacity, bool freeMemory = false)
2162  {
2163  newCapacity = VMA_MAX(newCapacity, m_Count);
2164 
2165  if((newCapacity < m_Capacity) && !freeMemory)
2166  {
2167  newCapacity = m_Capacity;
2168  }
2169 
2170  if(newCapacity != m_Capacity)
2171  {
2172  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2173  if(m_Count != 0)
2174  {
2175  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2176  }
2177  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2178  m_Capacity = newCapacity;
2179  m_pArray = newArray;
2180  }
2181  }
2182 
2183  void resize(size_t newCount, bool freeMemory = false)
2184  {
2185  size_t newCapacity = m_Capacity;
2186  if(newCount > m_Capacity)
2187  {
2188  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2189  }
2190  else if(freeMemory)
2191  {
2192  newCapacity = newCount;
2193  }
2194 
2195  if(newCapacity != m_Capacity)
2196  {
2197  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2198  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2199  if(elementsToCopy != 0)
2200  {
2201  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2202  }
2203  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2204  m_Capacity = newCapacity;
2205  m_pArray = newArray;
2206  }
2207 
2208  m_Count = newCount;
2209  }
2210 
2211  void clear(bool freeMemory = false)
2212  {
2213  resize(0, freeMemory);
2214  }
2215 
2216  void insert(size_t index, const T& src)
2217  {
2218  VMA_HEAVY_ASSERT(index <= m_Count);
2219  const size_t oldCount = size();
2220  resize(oldCount + 1);
2221  if(index < oldCount)
2222  {
2223  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2224  }
2225  m_pArray[index] = src;
2226  }
2227 
2228  void remove(size_t index)
2229  {
2230  VMA_HEAVY_ASSERT(index < m_Count);
2231  const size_t oldCount = size();
2232  if(index < oldCount - 1)
2233  {
2234  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2235  }
2236  resize(oldCount - 1);
2237  }
2238 
2239  void push_back(const T& src)
2240  {
2241  const size_t newIndex = size();
2242  resize(newIndex + 1);
2243  m_pArray[newIndex] = src;
2244  }
2245 
2246  void pop_back()
2247  {
2248  VMA_HEAVY_ASSERT(m_Count > 0);
2249  resize(size() - 1);
2250  }
2251 
2252  void push_front(const T& src)
2253  {
2254  insert(0, src);
2255  }
2256 
2257  void pop_front()
2258  {
2259  VMA_HEAVY_ASSERT(m_Count > 0);
2260  remove(0);
2261  }
2262 
2263  typedef T* iterator;
2264 
2265  iterator begin() { return m_pArray; }
2266  iterator end() { return m_pArray + m_Count; }
2267 
2268 private:
2269  AllocatorT m_Allocator;
2270  T* m_pArray;
2271  size_t m_Count;
2272  size_t m_Capacity;
2273 };
2274 
2275 template<typename T, typename allocatorT>
2276 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2277 {
2278  vec.insert(index, item);
2279 }
2280 
2281 template<typename T, typename allocatorT>
2282 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2283 {
2284  vec.remove(index);
2285 }
2286 
2287 #endif // #if VMA_USE_STL_VECTOR
2288 
2289 template<typename CmpLess, typename VectorT>
2290 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2291 {
2292  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2293  vector.data(),
2294  vector.data() + vector.size(),
2295  value,
2296  CmpLess()) - vector.data();
2297  VmaVectorInsert(vector, indexToInsert, value);
2298  return indexToInsert;
2299 }
2300 
2301 template<typename CmpLess, typename VectorT>
2302 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2303 {
2304  CmpLess comparator;
2305  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2306  vector.begin(),
2307  vector.end(),
2308  value,
2309  comparator);
2310  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2311  {
2312  size_t indexToRemove = it - vector.begin();
2313  VmaVectorRemove(vector, indexToRemove);
2314  return true;
2315  }
2316  return false;
2317 }
2318 
2319 template<typename CmpLess, typename VectorT>
2320 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2321 {
2322  CmpLess comparator;
2323  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2324  vector.data(),
2325  vector.data() + vector.size(),
2326  value,
2327  comparator);
2328  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2329  {
2330  return it - vector.begin();
2331  }
2332  else
2333  {
2334  return vector.size();
2335  }
2336 }
2337 
2339 // class VmaPoolAllocator
2340 
2341 /*
2342 Allocator for objects of type T using a list of arrays (pools) to speed up
2343 allocation. Number of elements that can be allocated is not bounded because
2344 allocator can create multiple blocks.
2345 */
2346 template<typename T>
2347 class VmaPoolAllocator
2348 {
2349 public:
2350  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2351  ~VmaPoolAllocator();
2352  void Clear();
2353  T* Alloc();
2354  void Free(T* ptr);
2355 
2356 private:
2357  union Item
2358  {
2359  uint32_t NextFreeIndex;
2360  T Value;
2361  };
2362 
2363  struct ItemBlock
2364  {
2365  Item* pItems;
2366  uint32_t FirstFreeIndex;
2367  };
2368 
2369  const VkAllocationCallbacks* m_pAllocationCallbacks;
2370  size_t m_ItemsPerBlock;
2371  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2372 
2373  ItemBlock& CreateNewBlock();
2374 };
2375 
2376 template<typename T>
2377 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2378  m_pAllocationCallbacks(pAllocationCallbacks),
2379  m_ItemsPerBlock(itemsPerBlock),
2380  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2381 {
2382  VMA_ASSERT(itemsPerBlock > 0);
2383 }
2384 
2385 template<typename T>
2386 VmaPoolAllocator<T>::~VmaPoolAllocator()
2387 {
2388  Clear();
2389 }
2390 
2391 template<typename T>
2392 void VmaPoolAllocator<T>::Clear()
2393 {
2394  for(size_t i = m_ItemBlocks.size(); i--; )
2395  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2396  m_ItemBlocks.clear();
2397 }
2398 
2399 template<typename T>
2400 T* VmaPoolAllocator<T>::Alloc()
2401 {
2402  for(size_t i = m_ItemBlocks.size(); i--; )
2403  {
2404  ItemBlock& block = m_ItemBlocks[i];
2405  // This block has some free items: Use first one.
2406  if(block.FirstFreeIndex != UINT32_MAX)
2407  {
2408  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2409  block.FirstFreeIndex = pItem->NextFreeIndex;
2410  return &pItem->Value;
2411  }
2412  }
2413 
2414  // No block has free item: Create new one and use it.
2415  ItemBlock& newBlock = CreateNewBlock();
2416  Item* const pItem = &newBlock.pItems[0];
2417  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2418  return &pItem->Value;
2419 }
2420 
2421 template<typename T>
2422 void VmaPoolAllocator<T>::Free(T* ptr)
2423 {
2424  // Search all memory blocks to find ptr.
2425  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2426  {
2427  ItemBlock& block = m_ItemBlocks[i];
2428 
2429  // Casting to union.
2430  Item* pItemPtr;
2431  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2432 
2433  // Check if pItemPtr is in address range of this block.
2434  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2435  {
2436  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2437  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2438  block.FirstFreeIndex = index;
2439  return;
2440  }
2441  }
2442  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2443 }
2444 
2445 template<typename T>
2446 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2447 {
2448  ItemBlock newBlock = {
2449  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2450 
2451  m_ItemBlocks.push_back(newBlock);
2452 
2453  // Setup singly-linked list of all free items in this block.
2454  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2455  newBlock.pItems[i].NextFreeIndex = i + 1;
2456  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2457  return m_ItemBlocks.back();
2458 }
2459 
2461 // class VmaRawList, VmaList
2462 
2463 #if VMA_USE_STL_LIST
2464 
2465 #define VmaList std::list
2466 
2467 #else // #if VMA_USE_STL_LIST
2468 
2469 template<typename T>
2470 struct VmaListItem
2471 {
2472  VmaListItem* pPrev;
2473  VmaListItem* pNext;
2474  T Value;
2475 };
2476 
2477 // Doubly linked list.
2478 template<typename T>
2479 class VmaRawList
2480 {
2481 public:
2482  typedef VmaListItem<T> ItemType;
2483 
2484  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2485  ~VmaRawList();
2486  void Clear();
2487 
2488  size_t GetCount() const { return m_Count; }
2489  bool IsEmpty() const { return m_Count == 0; }
2490 
2491  ItemType* Front() { return m_pFront; }
2492  const ItemType* Front() const { return m_pFront; }
2493  ItemType* Back() { return m_pBack; }
2494  const ItemType* Back() const { return m_pBack; }
2495 
2496  ItemType* PushBack();
2497  ItemType* PushFront();
2498  ItemType* PushBack(const T& value);
2499  ItemType* PushFront(const T& value);
2500  void PopBack();
2501  void PopFront();
2502 
2503  // Item can be null - it means PushBack.
2504  ItemType* InsertBefore(ItemType* pItem);
2505  // Item can be null - it means PushFront.
2506  ItemType* InsertAfter(ItemType* pItem);
2507 
2508  ItemType* InsertBefore(ItemType* pItem, const T& value);
2509  ItemType* InsertAfter(ItemType* pItem, const T& value);
2510 
2511  void Remove(ItemType* pItem);
2512 
2513 private:
2514  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2515  VmaPoolAllocator<ItemType> m_ItemAllocator;
2516  ItemType* m_pFront;
2517  ItemType* m_pBack;
2518  size_t m_Count;
2519 
2520  // Declared not defined, to block copy constructor and assignment operator.
2521  VmaRawList(const VmaRawList<T>& src);
2522  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2523 };
2524 
2525 template<typename T>
2526 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2527  m_pAllocationCallbacks(pAllocationCallbacks),
2528  m_ItemAllocator(pAllocationCallbacks, 128),
2529  m_pFront(VMA_NULL),
2530  m_pBack(VMA_NULL),
2531  m_Count(0)
2532 {
2533 }
2534 
2535 template<typename T>
2536 VmaRawList<T>::~VmaRawList()
2537 {
2538  // Intentionally not calling Clear, because that would be unnecessary
2539  // computations to return all items to m_ItemAllocator as free.
2540 }
2541 
2542 template<typename T>
2543 void VmaRawList<T>::Clear()
2544 {
2545  if(IsEmpty() == false)
2546  {
2547  ItemType* pItem = m_pBack;
2548  while(pItem != VMA_NULL)
2549  {
2550  ItemType* const pPrevItem = pItem->pPrev;
2551  m_ItemAllocator.Free(pItem);
2552  pItem = pPrevItem;
2553  }
2554  m_pFront = VMA_NULL;
2555  m_pBack = VMA_NULL;
2556  m_Count = 0;
2557  }
2558 }
2559 
2560 template<typename T>
2561 VmaListItem<T>* VmaRawList<T>::PushBack()
2562 {
2563  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2564  pNewItem->pNext = VMA_NULL;
2565  if(IsEmpty())
2566  {
2567  pNewItem->pPrev = VMA_NULL;
2568  m_pFront = pNewItem;
2569  m_pBack = pNewItem;
2570  m_Count = 1;
2571  }
2572  else
2573  {
2574  pNewItem->pPrev = m_pBack;
2575  m_pBack->pNext = pNewItem;
2576  m_pBack = pNewItem;
2577  ++m_Count;
2578  }
2579  return pNewItem;
2580 }
2581 
2582 template<typename T>
2583 VmaListItem<T>* VmaRawList<T>::PushFront()
2584 {
2585  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2586  pNewItem->pPrev = VMA_NULL;
2587  if(IsEmpty())
2588  {
2589  pNewItem->pNext = VMA_NULL;
2590  m_pFront = pNewItem;
2591  m_pBack = pNewItem;
2592  m_Count = 1;
2593  }
2594  else
2595  {
2596  pNewItem->pNext = m_pFront;
2597  m_pFront->pPrev = pNewItem;
2598  m_pFront = pNewItem;
2599  ++m_Count;
2600  }
2601  return pNewItem;
2602 }
2603 
2604 template<typename T>
2605 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2606 {
2607  ItemType* const pNewItem = PushBack();
2608  pNewItem->Value = value;
2609  return pNewItem;
2610 }
2611 
2612 template<typename T>
2613 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2614 {
2615  ItemType* const pNewItem = PushFront();
2616  pNewItem->Value = value;
2617  return pNewItem;
2618 }
2619 
2620 template<typename T>
2621 void VmaRawList<T>::PopBack()
2622 {
2623  VMA_HEAVY_ASSERT(m_Count > 0);
2624  ItemType* const pBackItem = m_pBack;
2625  ItemType* const pPrevItem = pBackItem->pPrev;
2626  if(pPrevItem != VMA_NULL)
2627  {
2628  pPrevItem->pNext = VMA_NULL;
2629  }
2630  m_pBack = pPrevItem;
2631  m_ItemAllocator.Free(pBackItem);
2632  --m_Count;
2633 }
2634 
2635 template<typename T>
2636 void VmaRawList<T>::PopFront()
2637 {
2638  VMA_HEAVY_ASSERT(m_Count > 0);
2639  ItemType* const pFrontItem = m_pFront;
2640  ItemType* const pNextItem = pFrontItem->pNext;
2641  if(pNextItem != VMA_NULL)
2642  {
2643  pNextItem->pPrev = VMA_NULL;
2644  }
2645  m_pFront = pNextItem;
2646  m_ItemAllocator.Free(pFrontItem);
2647  --m_Count;
2648 }
2649 
2650 template<typename T>
2651 void VmaRawList<T>::Remove(ItemType* pItem)
2652 {
2653  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2654  VMA_HEAVY_ASSERT(m_Count > 0);
2655 
2656  if(pItem->pPrev != VMA_NULL)
2657  {
2658  pItem->pPrev->pNext = pItem->pNext;
2659  }
2660  else
2661  {
2662  VMA_HEAVY_ASSERT(m_pFront == pItem);
2663  m_pFront = pItem->pNext;
2664  }
2665 
2666  if(pItem->pNext != VMA_NULL)
2667  {
2668  pItem->pNext->pPrev = pItem->pPrev;
2669  }
2670  else
2671  {
2672  VMA_HEAVY_ASSERT(m_pBack == pItem);
2673  m_pBack = pItem->pPrev;
2674  }
2675 
2676  m_ItemAllocator.Free(pItem);
2677  --m_Count;
2678 }
2679 
2680 template<typename T>
2681 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2682 {
2683  if(pItem != VMA_NULL)
2684  {
2685  ItemType* const prevItem = pItem->pPrev;
2686  ItemType* const newItem = m_ItemAllocator.Alloc();
2687  newItem->pPrev = prevItem;
2688  newItem->pNext = pItem;
2689  pItem->pPrev = newItem;
2690  if(prevItem != VMA_NULL)
2691  {
2692  prevItem->pNext = newItem;
2693  }
2694  else
2695  {
2696  VMA_HEAVY_ASSERT(m_pFront == pItem);
2697  m_pFront = newItem;
2698  }
2699  ++m_Count;
2700  return newItem;
2701  }
2702  else
2703  return PushBack();
2704 }
2705 
2706 template<typename T>
2707 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2708 {
2709  if(pItem != VMA_NULL)
2710  {
2711  ItemType* const nextItem = pItem->pNext;
2712  ItemType* const newItem = m_ItemAllocator.Alloc();
2713  newItem->pNext = nextItem;
2714  newItem->pPrev = pItem;
2715  pItem->pNext = newItem;
2716  if(nextItem != VMA_NULL)
2717  {
2718  nextItem->pPrev = newItem;
2719  }
2720  else
2721  {
2722  VMA_HEAVY_ASSERT(m_pBack == pItem);
2723  m_pBack = newItem;
2724  }
2725  ++m_Count;
2726  return newItem;
2727  }
2728  else
2729  return PushFront();
2730 }
2731 
2732 template<typename T>
2733 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2734 {
2735  ItemType* const newItem = InsertBefore(pItem);
2736  newItem->Value = value;
2737  return newItem;
2738 }
2739 
2740 template<typename T>
2741 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2742 {
2743  ItemType* const newItem = InsertAfter(pItem);
2744  newItem->Value = value;
2745  return newItem;
2746 }
2747 
2748 template<typename T, typename AllocatorT>
2749 class VmaList
2750 {
2751 public:
2752  class iterator
2753  {
2754  public:
2755  iterator() :
2756  m_pList(VMA_NULL),
2757  m_pItem(VMA_NULL)
2758  {
2759  }
2760 
2761  T& operator*() const
2762  {
2763  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2764  return m_pItem->Value;
2765  }
2766  T* operator->() const
2767  {
2768  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2769  return &m_pItem->Value;
2770  }
2771 
2772  iterator& operator++()
2773  {
2774  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2775  m_pItem = m_pItem->pNext;
2776  return *this;
2777  }
2778  iterator& operator--()
2779  {
2780  if(m_pItem != VMA_NULL)
2781  {
2782  m_pItem = m_pItem->pPrev;
2783  }
2784  else
2785  {
2786  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2787  m_pItem = m_pList->Back();
2788  }
2789  return *this;
2790  }
2791 
2792  iterator operator++(int)
2793  {
2794  iterator result = *this;
2795  ++*this;
2796  return result;
2797  }
2798  iterator operator--(int)
2799  {
2800  iterator result = *this;
2801  --*this;
2802  return result;
2803  }
2804 
2805  bool operator==(const iterator& rhs) const
2806  {
2807  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2808  return m_pItem == rhs.m_pItem;
2809  }
2810  bool operator!=(const iterator& rhs) const
2811  {
2812  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2813  return m_pItem != rhs.m_pItem;
2814  }
2815 
2816  private:
2817  VmaRawList<T>* m_pList;
2818  VmaListItem<T>* m_pItem;
2819 
2820  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2821  m_pList(pList),
2822  m_pItem(pItem)
2823  {
2824  }
2825 
2826  friend class VmaList<T, AllocatorT>;
2827  };
2828 
2829  class const_iterator
2830  {
2831  public:
2832  const_iterator() :
2833  m_pList(VMA_NULL),
2834  m_pItem(VMA_NULL)
2835  {
2836  }
2837 
2838  const_iterator(const iterator& src) :
2839  m_pList(src.m_pList),
2840  m_pItem(src.m_pItem)
2841  {
2842  }
2843 
2844  const T& operator*() const
2845  {
2846  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2847  return m_pItem->Value;
2848  }
2849  const T* operator->() const
2850  {
2851  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2852  return &m_pItem->Value;
2853  }
2854 
2855  const_iterator& operator++()
2856  {
2857  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2858  m_pItem = m_pItem->pNext;
2859  return *this;
2860  }
2861  const_iterator& operator--()
2862  {
2863  if(m_pItem != VMA_NULL)
2864  {
2865  m_pItem = m_pItem->pPrev;
2866  }
2867  else
2868  {
2869  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2870  m_pItem = m_pList->Back();
2871  }
2872  return *this;
2873  }
2874 
2875  const_iterator operator++(int)
2876  {
2877  const_iterator result = *this;
2878  ++*this;
2879  return result;
2880  }
2881  const_iterator operator--(int)
2882  {
2883  const_iterator result = *this;
2884  --*this;
2885  return result;
2886  }
2887 
2888  bool operator==(const const_iterator& rhs) const
2889  {
2890  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2891  return m_pItem == rhs.m_pItem;
2892  }
2893  bool operator!=(const const_iterator& rhs) const
2894  {
2895  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2896  return m_pItem != rhs.m_pItem;
2897  }
2898 
2899  private:
2900  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2901  m_pList(pList),
2902  m_pItem(pItem)
2903  {
2904  }
2905 
2906  const VmaRawList<T>* m_pList;
2907  const VmaListItem<T>* m_pItem;
2908 
2909  friend class VmaList<T, AllocatorT>;
2910  };
2911 
2912  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2913 
2914  bool empty() const { return m_RawList.IsEmpty(); }
2915  size_t size() const { return m_RawList.GetCount(); }
2916 
2917  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2918  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2919 
2920  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2921  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2922 
2923  void clear() { m_RawList.Clear(); }
2924  void push_back(const T& value) { m_RawList.PushBack(value); }
2925  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2926  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2927 
2928 private:
2929  VmaRawList<T> m_RawList;
2930 };
2931 
2932 #endif // #if VMA_USE_STL_LIST
2933 
2935 // class VmaMap
2936 
2937 // Unused in this version.
2938 #if 0
2939 
2940 #if VMA_USE_STL_UNORDERED_MAP
2941 
2942 #define VmaPair std::pair
2943 
2944 #define VMA_MAP_TYPE(KeyT, ValueT) \
2945  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2946 
2947 #else // #if VMA_USE_STL_UNORDERED_MAP
2948 
2949 template<typename T1, typename T2>
2950 struct VmaPair
2951 {
2952  T1 first;
2953  T2 second;
2954 
2955  VmaPair() : first(), second() { }
2956  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2957 };
2958 
2959 /* Class compatible with subset of interface of std::unordered_map.
2960 KeyT, ValueT must be POD because they will be stored in VmaVector.
2961 */
2962 template<typename KeyT, typename ValueT>
2963 class VmaMap
2964 {
2965 public:
2966  typedef VmaPair<KeyT, ValueT> PairType;
2967  typedef PairType* iterator;
2968 
2969  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2970 
2971  iterator begin() { return m_Vector.begin(); }
2972  iterator end() { return m_Vector.end(); }
2973 
2974  void insert(const PairType& pair);
2975  iterator find(const KeyT& key);
2976  void erase(iterator it);
2977 
2978 private:
2979  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2980 };
2981 
2982 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2983 
2984 template<typename FirstT, typename SecondT>
2985 struct VmaPairFirstLess
2986 {
2987  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2988  {
2989  return lhs.first < rhs.first;
2990  }
2991  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2992  {
2993  return lhs.first < rhsFirst;
2994  }
2995 };
2996 
2997 template<typename KeyT, typename ValueT>
2998 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2999 {
3000  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3001  m_Vector.data(),
3002  m_Vector.data() + m_Vector.size(),
3003  pair,
3004  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3005  VmaVectorInsert(m_Vector, indexToInsert, pair);
3006 }
3007 
3008 template<typename KeyT, typename ValueT>
3009 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3010 {
3011  PairType* it = VmaBinaryFindFirstNotLess(
3012  m_Vector.data(),
3013  m_Vector.data() + m_Vector.size(),
3014  key,
3015  VmaPairFirstLess<KeyT, ValueT>());
3016  if((it != m_Vector.end()) && (it->first == key))
3017  {
3018  return it;
3019  }
3020  else
3021  {
3022  return m_Vector.end();
3023  }
3024 }
3025 
3026 template<typename KeyT, typename ValueT>
3027 void VmaMap<KeyT, ValueT>::erase(iterator it)
3028 {
3029  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3030 }
3031 
3032 #endif // #if VMA_USE_STL_UNORDERED_MAP
3033 
3034 #endif // #if 0
3035 
3037 
3038 class VmaDeviceMemoryBlock;
3039 
3040 struct VmaAllocation_T
3041 {
3042 private:
3043  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3044 
3045  enum FLAGS
3046  {
3047  FLAG_USER_DATA_STRING = 0x01,
3048  };
3049 
3050 public:
3051  enum ALLOCATION_TYPE
3052  {
3053  ALLOCATION_TYPE_NONE,
3054  ALLOCATION_TYPE_BLOCK,
3055  ALLOCATION_TYPE_DEDICATED,
3056  };
3057 
3058  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3059  m_Alignment(1),
3060  m_Size(0),
3061  m_pUserData(VMA_NULL),
3062  m_LastUseFrameIndex(currentFrameIndex),
3063  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3064  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3065  m_MapCount(0),
3066  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3067  {
3068  }
3069 
3070  ~VmaAllocation_T()
3071  {
3072  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3073 
3074  // Check if owned string was freed.
3075  VMA_ASSERT(m_pUserData == VMA_NULL);
3076  }
3077 
3078  void InitBlockAllocation(
3079  VmaPool hPool,
3080  VmaDeviceMemoryBlock* block,
3081  VkDeviceSize offset,
3082  VkDeviceSize alignment,
3083  VkDeviceSize size,
3084  VmaSuballocationType suballocationType,
3085  bool mapped,
3086  bool canBecomeLost)
3087  {
3088  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3089  VMA_ASSERT(block != VMA_NULL);
3090  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3091  m_Alignment = alignment;
3092  m_Size = size;
3093  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3094  m_SuballocationType = (uint8_t)suballocationType;
3095  m_BlockAllocation.m_hPool = hPool;
3096  m_BlockAllocation.m_Block = block;
3097  m_BlockAllocation.m_Offset = offset;
3098  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3099  }
3100 
3101  void InitLost()
3102  {
3103  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3104  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3105  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3106  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3107  m_BlockAllocation.m_Block = VMA_NULL;
3108  m_BlockAllocation.m_Offset = 0;
3109  m_BlockAllocation.m_CanBecomeLost = true;
3110  }
3111 
3112  void ChangeBlockAllocation(
3113  VmaDeviceMemoryBlock* block,
3114  VkDeviceSize offset)
3115  {
3116  VMA_ASSERT(block != VMA_NULL);
3117  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3118  m_BlockAllocation.m_Block = block;
3119  m_BlockAllocation.m_Offset = offset;
3120  }
3121 
3122  // pMappedData not null means allocation is created with MAPPED flag.
3123  void InitDedicatedAllocation(
3124  uint32_t memoryTypeIndex,
3125  VkDeviceMemory hMemory,
3126  VmaSuballocationType suballocationType,
3127  void* pMappedData,
3128  VkDeviceSize size)
3129  {
3130  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3131  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3132  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3133  m_Alignment = 0;
3134  m_Size = size;
3135  m_SuballocationType = (uint8_t)suballocationType;
3136  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3137  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3138  m_DedicatedAllocation.m_hMemory = hMemory;
3139  m_DedicatedAllocation.m_pMappedData = pMappedData;
3140  }
3141 
3142  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3143  VkDeviceSize GetAlignment() const { return m_Alignment; }
3144  VkDeviceSize GetSize() const { return m_Size; }
3145  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3146  void* GetUserData() const { return m_pUserData; }
3147  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3148  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3149 
3150  VmaDeviceMemoryBlock* GetBlock() const
3151  {
3152  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3153  return m_BlockAllocation.m_Block;
3154  }
3155  VkDeviceSize GetOffset() const;
3156  VkDeviceMemory GetMemory() const;
3157  uint32_t GetMemoryTypeIndex() const;
3158  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3159  void* GetMappedData() const;
3160  bool CanBecomeLost() const;
3161  VmaPool GetPool() const;
3162 
3163  uint32_t GetLastUseFrameIndex() const
3164  {
3165  return m_LastUseFrameIndex.load();
3166  }
3167  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3168  {
3169  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3170  }
3171  /*
3172  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3173  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3174  - Else, returns false.
3175 
3176  If hAllocation is already lost, assert - you should not call it then.
3177  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3178  */
3179  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3180 
3181  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3182  {
3183  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3184  outInfo.blockCount = 1;
3185  outInfo.allocationCount = 1;
3186  outInfo.unusedRangeCount = 0;
3187  outInfo.usedBytes = m_Size;
3188  outInfo.unusedBytes = 0;
3189  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3190  outInfo.unusedRangeSizeMin = UINT64_MAX;
3191  outInfo.unusedRangeSizeMax = 0;
3192  }
3193 
3194  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3195  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3196 
3197 private:
3198  VkDeviceSize m_Alignment;
3199  VkDeviceSize m_Size;
3200  void* m_pUserData;
3201  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3202  uint8_t m_Type; // ALLOCATION_TYPE
3203  uint8_t m_SuballocationType; // VmaSuballocationType
3204  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3205  // Bits with mask 0x7F, used only when ALLOCATION_TYPE_DEDICATED, are reference counter for vmaMapMemory()/vmaUnmapMemory().
3206  uint8_t m_MapCount;
3207  uint8_t m_Flags; // enum FLAGS
3208 
3209  // Allocation out of VmaDeviceMemoryBlock.
3210  struct BlockAllocation
3211  {
3212  VmaPool m_hPool; // Null if belongs to general memory.
3213  VmaDeviceMemoryBlock* m_Block;
3214  VkDeviceSize m_Offset;
3215  bool m_CanBecomeLost;
3216  };
3217 
3218  // Allocation for an object that has its own private VkDeviceMemory.
3219  struct DedicatedAllocation
3220  {
3221  uint32_t m_MemoryTypeIndex;
3222  VkDeviceMemory m_hMemory;
3223  void* m_pMappedData; // Not null means memory is mapped.
3224  };
3225 
3226  union
3227  {
3228  // Allocation out of VmaDeviceMemoryBlock.
3229  BlockAllocation m_BlockAllocation;
3230  // Allocation for an object that has its own private VkDeviceMemory.
3231  DedicatedAllocation m_DedicatedAllocation;
3232  };
3233 
3234  void FreeUserDataString(VmaAllocator hAllocator);
3235 };
3236 
3237 /*
3238 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3239 allocated memory block or free.
3240 */
3241 struct VmaSuballocation
3242 {
3243  VkDeviceSize offset;
3244  VkDeviceSize size;
3245  VmaAllocation hAllocation;
3246  VmaSuballocationType type;
3247 };
3248 
3249 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3250 
3251 // Cost of one additional allocation lost, as equivalent in bytes.
3252 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3253 
3254 /*
3255 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3256 
3257 If canMakeOtherLost was false:
3258 - item points to a FREE suballocation.
3259 - itemsToMakeLostCount is 0.
3260 
3261 If canMakeOtherLost was true:
3262 - item points to first of sequence of suballocations, which are either FREE,
3263  or point to VmaAllocations that can become lost.
3264 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3265  the requested allocation to succeed.
3266 */
3267 struct VmaAllocationRequest
3268 {
3269  VkDeviceSize offset;
3270  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3271  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3272  VmaSuballocationList::iterator item;
3273  size_t itemsToMakeLostCount;
3274 
3275  VkDeviceSize CalcCost() const
3276  {
3277  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3278  }
3279 };
3280 
3281 /*
3282 Data structure used for bookkeeping of allocations and unused ranges of memory
3283 in a single VkDeviceMemory block.
3284 */
3285 class VmaBlockMetadata
3286 {
3287 public:
3288  VmaBlockMetadata(VmaAllocator hAllocator);
3289  ~VmaBlockMetadata();
3290  void Init(VkDeviceSize size);
3291 
3292  // Validates all data structures inside this object. If not valid, returns false.
3293  bool Validate() const;
3294  VkDeviceSize GetSize() const { return m_Size; }
3295  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3296  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3297  VkDeviceSize GetUnusedRangeSizeMax() const;
3298  // Returns true if this block is empty - contains only single free suballocation.
3299  bool IsEmpty() const;
3300 
3301  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3302  void AddPoolStats(VmaPoolStats& inoutStats) const;
3303 
3304 #if VMA_STATS_STRING_ENABLED
3305  void PrintDetailedMap(class VmaJsonWriter& json) const;
3306 #endif
3307 
3308  // Creates trivial request for case when block is empty.
3309  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3310 
3311  // Tries to find a place for suballocation with given parameters inside this block.
3312  // If succeeded, fills pAllocationRequest and returns true.
3313  // If failed, returns false.
3314  bool CreateAllocationRequest(
3315  uint32_t currentFrameIndex,
3316  uint32_t frameInUseCount,
3317  VkDeviceSize bufferImageGranularity,
3318  VkDeviceSize allocSize,
3319  VkDeviceSize allocAlignment,
3320  VmaSuballocationType allocType,
3321  bool canMakeOtherLost,
3322  VmaAllocationRequest* pAllocationRequest);
3323 
3324  bool MakeRequestedAllocationsLost(
3325  uint32_t currentFrameIndex,
3326  uint32_t frameInUseCount,
3327  VmaAllocationRequest* pAllocationRequest);
3328 
3329  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3330 
3331  // Makes actual allocation based on request. Request must already be checked and valid.
3332  void Alloc(
3333  const VmaAllocationRequest& request,
3334  VmaSuballocationType type,
3335  VkDeviceSize allocSize,
3336  VmaAllocation hAllocation);
3337 
3338  // Frees suballocation assigned to given memory region.
3339  void Free(const VmaAllocation allocation);
3340 
3341 private:
3342  VkDeviceSize m_Size;
3343  uint32_t m_FreeCount;
3344  VkDeviceSize m_SumFreeSize;
3345  VmaSuballocationList m_Suballocations;
3346  // Suballocations that are free and have size greater than certain threshold.
3347  // Sorted by size, ascending.
3348  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3349 
3350  bool ValidateFreeSuballocationList() const;
3351 
3352  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3353  // If yes, fills pOffset and returns true. If no, returns false.
3354  bool CheckAllocation(
3355  uint32_t currentFrameIndex,
3356  uint32_t frameInUseCount,
3357  VkDeviceSize bufferImageGranularity,
3358  VkDeviceSize allocSize,
3359  VkDeviceSize allocAlignment,
3360  VmaSuballocationType allocType,
3361  VmaSuballocationList::const_iterator suballocItem,
3362  bool canMakeOtherLost,
3363  VkDeviceSize* pOffset,
3364  size_t* itemsToMakeLostCount,
3365  VkDeviceSize* pSumFreeSize,
3366  VkDeviceSize* pSumItemSize) const;
3367  // Given free suballocation, it merges it with following one, which must also be free.
3368  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3369  // Releases given suballocation, making it free.
3370  // Merges it with adjacent free suballocations if applicable.
3371  // Returns iterator to new free suballocation at this place.
3372  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3373  // Given free suballocation, it inserts it into sorted list of
3374  // m_FreeSuballocationsBySize if it's suitable.
3375  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3376  // Given free suballocation, it removes it from sorted list of
3377  // m_FreeSuballocationsBySize if it's suitable.
3378  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3379 };
3380 
3381 // Helper class that represents mapped memory. Synchronized internally.
3382 class VmaDeviceMemoryMapping
3383 {
3384 public:
3385  VmaDeviceMemoryMapping();
3386  ~VmaDeviceMemoryMapping();
3387 
3388  void* GetMappedData() const { return m_pMappedData; }
3389 
3390  // ppData can be null.
3391  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData);
3392  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory);
3393 
3394 private:
3395  VMA_MUTEX m_Mutex;
3396  uint32_t m_MapCount;
3397  void* m_pMappedData;
3398 };
3399 
3400 /*
3401 Represents a single block of device memory (`VkDeviceMemory`) with all the
3402 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3403 
3404 Thread-safety: This class must be externally synchronized.
3405 */
3406 class VmaDeviceMemoryBlock
3407 {
3408 public:
3409  uint32_t m_MemoryTypeIndex;
3410  VkDeviceMemory m_hMemory;
3411  VmaDeviceMemoryMapping m_Mapping;
3412  VmaBlockMetadata m_Metadata;
3413 
3414  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3415 
3416  ~VmaDeviceMemoryBlock()
3417  {
3418  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3419  }
3420 
3421  // Always call after construction.
3422  void Init(
3423  uint32_t newMemoryTypeIndex,
3424  VkDeviceMemory newMemory,
3425  VkDeviceSize newSize);
3426  // Always call before destruction.
3427  void Destroy(VmaAllocator allocator);
3428 
3429  // Validates all data structures inside this object. If not valid, returns false.
3430  bool Validate() const;
3431 
3432  // ppData can be null.
3433  VkResult Map(VmaAllocator hAllocator, void** ppData);
3434  void Unmap(VmaAllocator hAllocator);
3435 };
3436 
3437 struct VmaPointerLess
3438 {
3439  bool operator()(const void* lhs, const void* rhs) const
3440  {
3441  return lhs < rhs;
3442  }
3443 };
3444 
3445 class VmaDefragmentator;
3446 
3447 /*
3448 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3449 Vulkan memory type.
3450 
3451 Synchronized internally with a mutex.
3452 */
3453 struct VmaBlockVector
3454 {
3455  VmaBlockVector(
3456  VmaAllocator hAllocator,
3457  uint32_t memoryTypeIndex,
3458  VkDeviceSize preferredBlockSize,
3459  size_t minBlockCount,
3460  size_t maxBlockCount,
3461  VkDeviceSize bufferImageGranularity,
3462  uint32_t frameInUseCount,
3463  bool isCustomPool);
3464  ~VmaBlockVector();
3465 
3466  VkResult CreateMinBlocks();
3467 
3468  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3469  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3470  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3471  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3472 
3473  void GetPoolStats(VmaPoolStats* pStats);
3474 
3475  bool IsEmpty() const { return m_Blocks.empty(); }
3476 
3477  VkResult Allocate(
3478  VmaPool hCurrentPool,
3479  uint32_t currentFrameIndex,
3480  const VkMemoryRequirements& vkMemReq,
3481  const VmaAllocationCreateInfo& createInfo,
3482  VmaSuballocationType suballocType,
3483  VmaAllocation* pAllocation);
3484 
3485  void Free(
3486  VmaAllocation hAllocation);
3487 
3488  // Adds statistics of this BlockVector to pStats.
3489  void AddStats(VmaStats* pStats);
3490 
3491 #if VMA_STATS_STRING_ENABLED
3492  void PrintDetailedMap(class VmaJsonWriter& json);
3493 #endif
3494 
3495  void MakePoolAllocationsLost(
3496  uint32_t currentFrameIndex,
3497  size_t* pLostAllocationCount);
3498 
3499  VmaDefragmentator* EnsureDefragmentator(
3500  VmaAllocator hAllocator,
3501  uint32_t currentFrameIndex);
3502 
3503  VkResult Defragment(
3504  VmaDefragmentationStats* pDefragmentationStats,
3505  VkDeviceSize& maxBytesToMove,
3506  uint32_t& maxAllocationsToMove);
3507 
3508  void DestroyDefragmentator();
3509 
3510 private:
3511  friend class VmaDefragmentator;
3512 
3513  const VmaAllocator m_hAllocator;
3514  const uint32_t m_MemoryTypeIndex;
3515  const VkDeviceSize m_PreferredBlockSize;
3516  const size_t m_MinBlockCount;
3517  const size_t m_MaxBlockCount;
3518  const VkDeviceSize m_BufferImageGranularity;
3519  const uint32_t m_FrameInUseCount;
3520  const bool m_IsCustomPool;
3521  VMA_MUTEX m_Mutex;
3522  // Incrementally sorted by sumFreeSize, ascending.
3523  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3524  /* There can be at most one allocation that is completely empty - a
3525  hysteresis to avoid pessimistic case of alternating creation and destruction
3526  of a VkDeviceMemory. */
3527  bool m_HasEmptyBlock;
3528  VmaDefragmentator* m_pDefragmentator;
3529 
3530  // Finds and removes given block from vector.
3531  void Remove(VmaDeviceMemoryBlock* pBlock);
3532 
3533  // Performs single step in sorting m_Blocks. They may not be fully sorted
3534  // after this call.
3535  void IncrementallySortBlocks();
3536 
3537  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3538 };
3539 
3540 struct VmaPool_T
3541 {
3542 public:
3543  VmaBlockVector m_BlockVector;
3544 
3545  // Takes ownership.
3546  VmaPool_T(
3547  VmaAllocator hAllocator,
3548  const VmaPoolCreateInfo& createInfo);
3549  ~VmaPool_T();
3550 
3551  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3552 
3553 #if VMA_STATS_STRING_ENABLED
3554  //void PrintDetailedMap(class VmaStringBuilder& sb);
3555 #endif
3556 };
3557 
3558 class VmaDefragmentator
3559 {
3560  const VmaAllocator m_hAllocator;
3561  VmaBlockVector* const m_pBlockVector;
3562  uint32_t m_CurrentFrameIndex;
3563  VkDeviceSize m_BytesMoved;
3564  uint32_t m_AllocationsMoved;
3565 
3566  struct AllocationInfo
3567  {
3568  VmaAllocation m_hAllocation;
3569  VkBool32* m_pChanged;
3570 
3571  AllocationInfo() :
3572  m_hAllocation(VK_NULL_HANDLE),
3573  m_pChanged(VMA_NULL)
3574  {
3575  }
3576  };
3577 
3578  struct AllocationInfoSizeGreater
3579  {
3580  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3581  {
3582  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3583  }
3584  };
3585 
3586  // Used between AddAllocation and Defragment.
3587  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3588 
3589  struct BlockInfo
3590  {
3591  VmaDeviceMemoryBlock* m_pBlock;
3592  bool m_HasNonMovableAllocations;
3593  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3594 
3595  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3596  m_pBlock(VMA_NULL),
3597  m_HasNonMovableAllocations(true),
3598  m_Allocations(pAllocationCallbacks),
3599  m_pMappedDataForDefragmentation(VMA_NULL)
3600  {
3601  }
3602 
3603  void CalcHasNonMovableAllocations()
3604  {
3605  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3606  const size_t defragmentAllocCount = m_Allocations.size();
3607  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3608  }
3609 
3610  void SortAllocationsBySizeDescecnding()
3611  {
3612  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3613  }
3614 
3615  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3616  void Unmap(VmaAllocator hAllocator);
3617 
3618  private:
3619  // Not null if mapped for defragmentation only, not originally mapped.
3620  void* m_pMappedDataForDefragmentation;
3621  };
3622 
3623  struct BlockPointerLess
3624  {
3625  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3626  {
3627  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3628  }
3629  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3630  {
3631  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3632  }
3633  };
3634 
3635  // 1. Blocks with some non-movable allocations go first.
3636  // 2. Blocks with smaller sumFreeSize go first.
3637  struct BlockInfoCompareMoveDestination
3638  {
3639  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3640  {
3641  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3642  {
3643  return true;
3644  }
3645  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3646  {
3647  return false;
3648  }
3649  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3650  {
3651  return true;
3652  }
3653  return false;
3654  }
3655  };
3656 
3657  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3658  BlockInfoVector m_Blocks;
3659 
3660  VkResult DefragmentRound(
3661  VkDeviceSize maxBytesToMove,
3662  uint32_t maxAllocationsToMove);
3663 
3664  static bool MoveMakesSense(
3665  size_t dstBlockIndex, VkDeviceSize dstOffset,
3666  size_t srcBlockIndex, VkDeviceSize srcOffset);
3667 
3668 public:
3669  VmaDefragmentator(
3670  VmaAllocator hAllocator,
3671  VmaBlockVector* pBlockVector,
3672  uint32_t currentFrameIndex);
3673 
3674  ~VmaDefragmentator();
3675 
3676  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3677  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3678 
3679  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3680 
3681  VkResult Defragment(
3682  VkDeviceSize maxBytesToMove,
3683  uint32_t maxAllocationsToMove);
3684 };
3685 
3686 // Main allocator object.
3687 struct VmaAllocator_T
3688 {
3689  bool m_UseMutex;
3690  bool m_UseKhrDedicatedAllocation;
3691  VkDevice m_hDevice;
3692  bool m_AllocationCallbacksSpecified;
3693  VkAllocationCallbacks m_AllocationCallbacks;
3694  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3695 
3696  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3697  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3698  VMA_MUTEX m_HeapSizeLimitMutex;
3699 
3700  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3701  VkPhysicalDeviceMemoryProperties m_MemProps;
3702 
3703  // Default pools.
3704  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
3705 
3706  // Each vector is sorted by memory (handle value).
3707  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3708  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
3709  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
3710 
3711  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3712  ~VmaAllocator_T();
3713 
3714  const VkAllocationCallbacks* GetAllocationCallbacks() const
3715  {
3716  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3717  }
3718  const VmaVulkanFunctions& GetVulkanFunctions() const
3719  {
3720  return m_VulkanFunctions;
3721  }
3722 
3723  VkDeviceSize GetBufferImageGranularity() const
3724  {
3725  return VMA_MAX(
3726  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3727  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3728  }
3729 
3730  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3731  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3732 
3733  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3734  {
3735  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3736  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3737  }
3738 
3739  void GetBufferMemoryRequirements(
3740  VkBuffer hBuffer,
3741  VkMemoryRequirements& memReq,
3742  bool& requiresDedicatedAllocation,
3743  bool& prefersDedicatedAllocation) const;
3744  void GetImageMemoryRequirements(
3745  VkImage hImage,
3746  VkMemoryRequirements& memReq,
3747  bool& requiresDedicatedAllocation,
3748  bool& prefersDedicatedAllocation) const;
3749 
3750  // Main allocation function.
3751  VkResult AllocateMemory(
3752  const VkMemoryRequirements& vkMemReq,
3753  bool requiresDedicatedAllocation,
3754  bool prefersDedicatedAllocation,
3755  VkBuffer dedicatedBuffer,
3756  VkImage dedicatedImage,
3757  const VmaAllocationCreateInfo& createInfo,
3758  VmaSuballocationType suballocType,
3759  VmaAllocation* pAllocation);
3760 
3761  // Main deallocation function.
3762  void FreeMemory(const VmaAllocation allocation);
3763 
3764  void CalculateStats(VmaStats* pStats);
3765 
3766 #if VMA_STATS_STRING_ENABLED
3767  void PrintDetailedMap(class VmaJsonWriter& json);
3768 #endif
3769 
3770  VkResult Defragment(
3771  VmaAllocation* pAllocations,
3772  size_t allocationCount,
3773  VkBool32* pAllocationsChanged,
3774  const VmaDefragmentationInfo* pDefragmentationInfo,
3775  VmaDefragmentationStats* pDefragmentationStats);
3776 
3777  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3778 
3779  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3780  void DestroyPool(VmaPool pool);
3781  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3782 
3783  void SetCurrentFrameIndex(uint32_t frameIndex);
3784 
3785  void MakePoolAllocationsLost(
3786  VmaPool hPool,
3787  size_t* pLostAllocationCount);
3788 
3789  void CreateLostAllocation(VmaAllocation* pAllocation);
3790 
3791  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3792  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3793 
3794  VkResult Map(VmaAllocation hAllocation, void** ppData);
3795  void Unmap(VmaAllocation hAllocation);
3796 
3797 private:
3798  VkDeviceSize m_PreferredLargeHeapBlockSize;
3799  VkDeviceSize m_PreferredSmallHeapBlockSize;
3800 
3801  VkPhysicalDevice m_PhysicalDevice;
3802  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3803 
3804  VMA_MUTEX m_PoolsMutex;
3805  // Protected by m_PoolsMutex. Sorted by pointer value.
3806  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3807 
3808  VmaVulkanFunctions m_VulkanFunctions;
3809 
3810  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3811 
3812  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3813 
3814  VkResult AllocateMemoryOfType(
3815  const VkMemoryRequirements& vkMemReq,
3816  bool dedicatedAllocation,
3817  VkBuffer dedicatedBuffer,
3818  VkImage dedicatedImage,
3819  const VmaAllocationCreateInfo& createInfo,
3820  uint32_t memTypeIndex,
3821  VmaSuballocationType suballocType,
3822  VmaAllocation* pAllocation);
3823 
3824  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3825  VkResult AllocateDedicatedMemory(
3826  VkDeviceSize size,
3827  VmaSuballocationType suballocType,
3828  uint32_t memTypeIndex,
3829  bool map,
3830  bool isUserDataString,
3831  void* pUserData,
3832  VkBuffer dedicatedBuffer,
3833  VkImage dedicatedImage,
3834  VmaAllocation* pAllocation);
3835 
3836  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
3837  void FreeDedicatedMemory(VmaAllocation allocation);
3838 };
3839 
3841 // Memory allocation #2 after VmaAllocator_T definition
3842 
3843 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3844 {
3845  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3846 }
3847 
3848 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3849 {
3850  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3851 }
3852 
3853 template<typename T>
3854 static T* VmaAllocate(VmaAllocator hAllocator)
3855 {
3856  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3857 }
3858 
3859 template<typename T>
3860 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3861 {
3862  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3863 }
3864 
3865 template<typename T>
3866 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3867 {
3868  if(ptr != VMA_NULL)
3869  {
3870  ptr->~T();
3871  VmaFree(hAllocator, ptr);
3872  }
3873 }
3874 
3875 template<typename T>
3876 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3877 {
3878  if(ptr != VMA_NULL)
3879  {
3880  for(size_t i = count; i--; )
3881  ptr[i].~T();
3882  VmaFree(hAllocator, ptr);
3883  }
3884 }
3885 
3887 // VmaStringBuilder
3888 
3889 #if VMA_STATS_STRING_ENABLED
3890 
3891 class VmaStringBuilder
3892 {
3893 public:
3894  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3895  size_t GetLength() const { return m_Data.size(); }
3896  const char* GetData() const { return m_Data.data(); }
3897 
3898  void Add(char ch) { m_Data.push_back(ch); }
3899  void Add(const char* pStr);
3900  void AddNewLine() { Add('\n'); }
3901  void AddNumber(uint32_t num);
3902  void AddNumber(uint64_t num);
3903  void AddPointer(const void* ptr);
3904 
3905 private:
3906  VmaVector< char, VmaStlAllocator<char> > m_Data;
3907 };
3908 
3909 void VmaStringBuilder::Add(const char* pStr)
3910 {
3911  const size_t strLen = strlen(pStr);
3912  if(strLen > 0)
3913  {
3914  const size_t oldCount = m_Data.size();
3915  m_Data.resize(oldCount + strLen);
3916  memcpy(m_Data.data() + oldCount, pStr, strLen);
3917  }
3918 }
3919 
3920 void VmaStringBuilder::AddNumber(uint32_t num)
3921 {
3922  char buf[11];
3923  VmaUint32ToStr(buf, sizeof(buf), num);
3924  Add(buf);
3925 }
3926 
3927 void VmaStringBuilder::AddNumber(uint64_t num)
3928 {
3929  char buf[21];
3930  VmaUint64ToStr(buf, sizeof(buf), num);
3931  Add(buf);
3932 }
3933 
3934 void VmaStringBuilder::AddPointer(const void* ptr)
3935 {
3936  char buf[21];
3937  VmaPtrToStr(buf, sizeof(buf), ptr);
3938  Add(buf);
3939 }
3940 
3941 #endif // #if VMA_STATS_STRING_ENABLED
3942 
3944 // VmaJsonWriter
3945 
3946 #if VMA_STATS_STRING_ENABLED
3947 
3948 class VmaJsonWriter
3949 {
3950 public:
3951  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3952  ~VmaJsonWriter();
3953 
3954  void BeginObject(bool singleLine = false);
3955  void EndObject();
3956 
3957  void BeginArray(bool singleLine = false);
3958  void EndArray();
3959 
3960  void WriteString(const char* pStr);
3961  void BeginString(const char* pStr = VMA_NULL);
3962  void ContinueString(const char* pStr);
3963  void ContinueString(uint32_t n);
3964  void ContinueString(uint64_t n);
3965  void ContinueString_Pointer(const void* ptr);
3966  void EndString(const char* pStr = VMA_NULL);
3967 
3968  void WriteNumber(uint32_t n);
3969  void WriteNumber(uint64_t n);
3970  void WriteBool(bool b);
3971  void WriteNull();
3972 
3973 private:
3974  static const char* const INDENT;
3975 
3976  enum COLLECTION_TYPE
3977  {
3978  COLLECTION_TYPE_OBJECT,
3979  COLLECTION_TYPE_ARRAY,
3980  };
3981  struct StackItem
3982  {
3983  COLLECTION_TYPE type;
3984  uint32_t valueCount;
3985  bool singleLineMode;
3986  };
3987 
3988  VmaStringBuilder& m_SB;
3989  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3990  bool m_InsideString;
3991 
3992  void BeginValue(bool isString);
3993  void WriteIndent(bool oneLess = false);
3994 };
3995 
3996 const char* const VmaJsonWriter::INDENT = " ";
3997 
3998 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
3999  m_SB(sb),
4000  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4001  m_InsideString(false)
4002 {
4003 }
4004 
4005 VmaJsonWriter::~VmaJsonWriter()
4006 {
4007  VMA_ASSERT(!m_InsideString);
4008  VMA_ASSERT(m_Stack.empty());
4009 }
4010 
4011 void VmaJsonWriter::BeginObject(bool singleLine)
4012 {
4013  VMA_ASSERT(!m_InsideString);
4014 
4015  BeginValue(false);
4016  m_SB.Add('{');
4017 
4018  StackItem item;
4019  item.type = COLLECTION_TYPE_OBJECT;
4020  item.valueCount = 0;
4021  item.singleLineMode = singleLine;
4022  m_Stack.push_back(item);
4023 }
4024 
4025 void VmaJsonWriter::EndObject()
4026 {
4027  VMA_ASSERT(!m_InsideString);
4028 
4029  WriteIndent(true);
4030  m_SB.Add('}');
4031 
4032  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4033  m_Stack.pop_back();
4034 }
4035 
4036 void VmaJsonWriter::BeginArray(bool singleLine)
4037 {
4038  VMA_ASSERT(!m_InsideString);
4039 
4040  BeginValue(false);
4041  m_SB.Add('[');
4042 
4043  StackItem item;
4044  item.type = COLLECTION_TYPE_ARRAY;
4045  item.valueCount = 0;
4046  item.singleLineMode = singleLine;
4047  m_Stack.push_back(item);
4048 }
4049 
4050 void VmaJsonWriter::EndArray()
4051 {
4052  VMA_ASSERT(!m_InsideString);
4053 
4054  WriteIndent(true);
4055  m_SB.Add(']');
4056 
4057  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4058  m_Stack.pop_back();
4059 }
4060 
4061 void VmaJsonWriter::WriteString(const char* pStr)
4062 {
4063  BeginString(pStr);
4064  EndString();
4065 }
4066 
4067 void VmaJsonWriter::BeginString(const char* pStr)
4068 {
4069  VMA_ASSERT(!m_InsideString);
4070 
4071  BeginValue(true);
4072  m_SB.Add('"');
4073  m_InsideString = true;
4074  if(pStr != VMA_NULL && pStr[0] != '\0')
4075  {
4076  ContinueString(pStr);
4077  }
4078 }
4079 
4080 void VmaJsonWriter::ContinueString(const char* pStr)
4081 {
4082  VMA_ASSERT(m_InsideString);
4083 
4084  const size_t strLen = strlen(pStr);
4085  for(size_t i = 0; i < strLen; ++i)
4086  {
4087  char ch = pStr[i];
4088  if(ch == '\'')
4089  {
4090  m_SB.Add("\\\\");
4091  }
4092  else if(ch == '"')
4093  {
4094  m_SB.Add("\\\"");
4095  }
4096  else if(ch >= 32)
4097  {
4098  m_SB.Add(ch);
4099  }
4100  else switch(ch)
4101  {
4102  case '\b':
4103  m_SB.Add("\\b");
4104  break;
4105  case '\f':
4106  m_SB.Add("\\f");
4107  break;
4108  case '\n':
4109  m_SB.Add("\\n");
4110  break;
4111  case '\r':
4112  m_SB.Add("\\r");
4113  break;
4114  case '\t':
4115  m_SB.Add("\\t");
4116  break;
4117  default:
4118  VMA_ASSERT(0 && "Character not currently supported.");
4119  break;
4120  }
4121  }
4122 }
4123 
4124 void VmaJsonWriter::ContinueString(uint32_t n)
4125 {
4126  VMA_ASSERT(m_InsideString);
4127  m_SB.AddNumber(n);
4128 }
4129 
4130 void VmaJsonWriter::ContinueString(uint64_t n)
4131 {
4132  VMA_ASSERT(m_InsideString);
4133  m_SB.AddNumber(n);
4134 }
4135 
4136 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4137 {
4138  VMA_ASSERT(m_InsideString);
4139  m_SB.AddPointer(ptr);
4140 }
4141 
4142 void VmaJsonWriter::EndString(const char* pStr)
4143 {
4144  VMA_ASSERT(m_InsideString);
4145  if(pStr != VMA_NULL && pStr[0] != '\0')
4146  {
4147  ContinueString(pStr);
4148  }
4149  m_SB.Add('"');
4150  m_InsideString = false;
4151 }
4152 
4153 void VmaJsonWriter::WriteNumber(uint32_t n)
4154 {
4155  VMA_ASSERT(!m_InsideString);
4156  BeginValue(false);
4157  m_SB.AddNumber(n);
4158 }
4159 
4160 void VmaJsonWriter::WriteNumber(uint64_t n)
4161 {
4162  VMA_ASSERT(!m_InsideString);
4163  BeginValue(false);
4164  m_SB.AddNumber(n);
4165 }
4166 
4167 void VmaJsonWriter::WriteBool(bool b)
4168 {
4169  VMA_ASSERT(!m_InsideString);
4170  BeginValue(false);
4171  m_SB.Add(b ? "true" : "false");
4172 }
4173 
4174 void VmaJsonWriter::WriteNull()
4175 {
4176  VMA_ASSERT(!m_InsideString);
4177  BeginValue(false);
4178  m_SB.Add("null");
4179 }
4180 
4181 void VmaJsonWriter::BeginValue(bool isString)
4182 {
4183  if(!m_Stack.empty())
4184  {
4185  StackItem& currItem = m_Stack.back();
4186  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4187  currItem.valueCount % 2 == 0)
4188  {
4189  VMA_ASSERT(isString);
4190  }
4191 
4192  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4193  currItem.valueCount % 2 != 0)
4194  {
4195  m_SB.Add(": ");
4196  }
4197  else if(currItem.valueCount > 0)
4198  {
4199  m_SB.Add(", ");
4200  WriteIndent();
4201  }
4202  else
4203  {
4204  WriteIndent();
4205  }
4206  ++currItem.valueCount;
4207  }
4208 }
4209 
4210 void VmaJsonWriter::WriteIndent(bool oneLess)
4211 {
4212  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4213  {
4214  m_SB.AddNewLine();
4215 
4216  size_t count = m_Stack.size();
4217  if(count > 0 && oneLess)
4218  {
4219  --count;
4220  }
4221  for(size_t i = 0; i < count; ++i)
4222  {
4223  m_SB.Add(INDENT);
4224  }
4225  }
4226 }
4227 
4228 #endif // #if VMA_STATS_STRING_ENABLED
4229 
4231 
4232 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4233 {
4234  if(IsUserDataString())
4235  {
4236  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4237 
4238  FreeUserDataString(hAllocator);
4239 
4240  if(pUserData != VMA_NULL)
4241  {
4242  const char* const newStrSrc = (char*)pUserData;
4243  const size_t newStrLen = strlen(newStrSrc);
4244  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4245  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4246  m_pUserData = newStrDst;
4247  }
4248  }
4249  else
4250  {
4251  m_pUserData = pUserData;
4252  }
4253 }
4254 
4255 VkDeviceSize VmaAllocation_T::GetOffset() const
4256 {
4257  switch(m_Type)
4258  {
4259  case ALLOCATION_TYPE_BLOCK:
4260  return m_BlockAllocation.m_Offset;
4261  case ALLOCATION_TYPE_DEDICATED:
4262  return 0;
4263  default:
4264  VMA_ASSERT(0);
4265  return 0;
4266  }
4267 }
4268 
4269 VkDeviceMemory VmaAllocation_T::GetMemory() const
4270 {
4271  switch(m_Type)
4272  {
4273  case ALLOCATION_TYPE_BLOCK:
4274  return m_BlockAllocation.m_Block->m_hMemory;
4275  case ALLOCATION_TYPE_DEDICATED:
4276  return m_DedicatedAllocation.m_hMemory;
4277  default:
4278  VMA_ASSERT(0);
4279  return VK_NULL_HANDLE;
4280  }
4281 }
4282 
4283 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4284 {
4285  switch(m_Type)
4286  {
4287  case ALLOCATION_TYPE_BLOCK:
4288  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4289  case ALLOCATION_TYPE_DEDICATED:
4290  return m_DedicatedAllocation.m_MemoryTypeIndex;
4291  default:
4292  VMA_ASSERT(0);
4293  return UINT32_MAX;
4294  }
4295 }
4296 
4297 void* VmaAllocation_T::GetMappedData() const
4298 {
4299  switch(m_Type)
4300  {
4301  case ALLOCATION_TYPE_BLOCK:
4302  if(m_MapCount != 0)
4303  {
4304  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4305  VMA_ASSERT(pBlockData != VMA_NULL);
4306  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4307  }
4308  else
4309  {
4310  return VMA_NULL;
4311  }
4312  break;
4313  case ALLOCATION_TYPE_DEDICATED:
4314  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4315  return m_DedicatedAllocation.m_pMappedData;
4316  default:
4317  VMA_ASSERT(0);
4318  return VMA_NULL;
4319  }
4320 }
4321 
4322 bool VmaAllocation_T::CanBecomeLost() const
4323 {
4324  switch(m_Type)
4325  {
4326  case ALLOCATION_TYPE_BLOCK:
4327  return m_BlockAllocation.m_CanBecomeLost;
4328  case ALLOCATION_TYPE_DEDICATED:
4329  return false;
4330  default:
4331  VMA_ASSERT(0);
4332  return false;
4333  }
4334 }
4335 
4336 VmaPool VmaAllocation_T::GetPool() const
4337 {
4338  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4339  return m_BlockAllocation.m_hPool;
4340 }
4341 
4342 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4343 {
4344  VMA_ASSERT(CanBecomeLost());
4345 
4346  /*
4347  Warning: This is a carefully designed algorithm.
4348  Do not modify unless you really know what you're doing :)
4349  */
4350  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4351  for(;;)
4352  {
4353  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4354  {
4355  VMA_ASSERT(0);
4356  return false;
4357  }
4358  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4359  {
4360  return false;
4361  }
4362  else // Last use time earlier than current time.
4363  {
4364  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4365  {
4366  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4367  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4368  return true;
4369  }
4370  }
4371  }
4372 }
4373 
4374 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4375 {
4376  VMA_ASSERT(IsUserDataString());
4377  if(m_pUserData != VMA_NULL)
4378  {
4379  char* const oldStr = (char*)m_pUserData;
4380  const size_t oldStrLen = strlen(oldStr);
4381  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4382  m_pUserData = VMA_NULL;
4383  }
4384 }
4385 
4386 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4387 {
4388  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4389 
4390  if(m_MapCount != 0)
4391  {
4392  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4393  {
4394  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4395  *ppData = m_DedicatedAllocation.m_pMappedData;
4396  ++m_MapCount;
4397  return VK_SUCCESS;
4398  }
4399  else
4400  {
4401  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4402  return VK_ERROR_MEMORY_MAP_FAILED;
4403  }
4404  }
4405  else
4406  {
4407  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4408  hAllocator->m_hDevice,
4409  m_DedicatedAllocation.m_hMemory,
4410  0, // offset
4411  VK_WHOLE_SIZE,
4412  0, // flags
4413  ppData);
4414  if(result == VK_SUCCESS)
4415  {
4416  m_DedicatedAllocation.m_pMappedData = *ppData;
4417  m_MapCount = 1;
4418  }
4419  return result;
4420  }
4421 }
4422 
4423 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4424 {
4425  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4426 
4427  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4428  {
4429  --m_MapCount;
4430  if(m_MapCount == 0)
4431  {
4432  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4433  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4434  hAllocator->m_hDevice,
4435  m_DedicatedAllocation.m_hMemory);
4436  }
4437  }
4438  else
4439  {
4440  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4441  }
4442 }
4443 
4444 #if VMA_STATS_STRING_ENABLED
4445 
4446 // Correspond to values of enum VmaSuballocationType.
4447 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4448  "FREE",
4449  "UNKNOWN",
4450  "BUFFER",
4451  "IMAGE_UNKNOWN",
4452  "IMAGE_LINEAR",
4453  "IMAGE_OPTIMAL",
4454 };
4455 
4456 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4457 {
4458  json.BeginObject();
4459 
4460  json.WriteString("Blocks");
4461  json.WriteNumber(stat.blockCount);
4462 
4463  json.WriteString("Allocations");
4464  json.WriteNumber(stat.allocationCount);
4465 
4466  json.WriteString("UnusedRanges");
4467  json.WriteNumber(stat.unusedRangeCount);
4468 
4469  json.WriteString("UsedBytes");
4470  json.WriteNumber(stat.usedBytes);
4471 
4472  json.WriteString("UnusedBytes");
4473  json.WriteNumber(stat.unusedBytes);
4474 
4475  if(stat.allocationCount > 1)
4476  {
4477  json.WriteString("AllocationSize");
4478  json.BeginObject(true);
4479  json.WriteString("Min");
4480  json.WriteNumber(stat.allocationSizeMin);
4481  json.WriteString("Avg");
4482  json.WriteNumber(stat.allocationSizeAvg);
4483  json.WriteString("Max");
4484  json.WriteNumber(stat.allocationSizeMax);
4485  json.EndObject();
4486  }
4487 
4488  if(stat.unusedRangeCount > 1)
4489  {
4490  json.WriteString("UnusedRangeSize");
4491  json.BeginObject(true);
4492  json.WriteString("Min");
4493  json.WriteNumber(stat.unusedRangeSizeMin);
4494  json.WriteString("Avg");
4495  json.WriteNumber(stat.unusedRangeSizeAvg);
4496  json.WriteString("Max");
4497  json.WriteNumber(stat.unusedRangeSizeMax);
4498  json.EndObject();
4499  }
4500 
4501  json.EndObject();
4502 }
4503 
4504 #endif // #if VMA_STATS_STRING_ENABLED
4505 
4506 struct VmaSuballocationItemSizeLess
4507 {
4508  bool operator()(
4509  const VmaSuballocationList::iterator lhs,
4510  const VmaSuballocationList::iterator rhs) const
4511  {
4512  return lhs->size < rhs->size;
4513  }
4514  bool operator()(
4515  const VmaSuballocationList::iterator lhs,
4516  VkDeviceSize rhsSize) const
4517  {
4518  return lhs->size < rhsSize;
4519  }
4520 };
4521 
4523 // class VmaBlockMetadata
4524 
4525 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4526  m_Size(0),
4527  m_FreeCount(0),
4528  m_SumFreeSize(0),
4529  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4530  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4531 {
4532 }
4533 
4534 VmaBlockMetadata::~VmaBlockMetadata()
4535 {
4536 }
4537 
4538 void VmaBlockMetadata::Init(VkDeviceSize size)
4539 {
4540  m_Size = size;
4541  m_FreeCount = 1;
4542  m_SumFreeSize = size;
4543 
4544  VmaSuballocation suballoc = {};
4545  suballoc.offset = 0;
4546  suballoc.size = size;
4547  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4548  suballoc.hAllocation = VK_NULL_HANDLE;
4549 
4550  m_Suballocations.push_back(suballoc);
4551  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4552  --suballocItem;
4553  m_FreeSuballocationsBySize.push_back(suballocItem);
4554 }
4555 
4556 bool VmaBlockMetadata::Validate() const
4557 {
4558  if(m_Suballocations.empty())
4559  {
4560  return false;
4561  }
4562 
4563  // Expected offset of new suballocation as calculates from previous ones.
4564  VkDeviceSize calculatedOffset = 0;
4565  // Expected number of free suballocations as calculated from traversing their list.
4566  uint32_t calculatedFreeCount = 0;
4567  // Expected sum size of free suballocations as calculated from traversing their list.
4568  VkDeviceSize calculatedSumFreeSize = 0;
4569  // Expected number of free suballocations that should be registered in
4570  // m_FreeSuballocationsBySize calculated from traversing their list.
4571  size_t freeSuballocationsToRegister = 0;
4572  // True if previous visisted suballocation was free.
4573  bool prevFree = false;
4574 
4575  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4576  suballocItem != m_Suballocations.cend();
4577  ++suballocItem)
4578  {
4579  const VmaSuballocation& subAlloc = *suballocItem;
4580 
4581  // Actual offset of this suballocation doesn't match expected one.
4582  if(subAlloc.offset != calculatedOffset)
4583  {
4584  return false;
4585  }
4586 
4587  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4588  // Two adjacent free suballocations are invalid. They should be merged.
4589  if(prevFree && currFree)
4590  {
4591  return false;
4592  }
4593  prevFree = currFree;
4594 
4595  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4596  {
4597  return false;
4598  }
4599 
4600  if(currFree)
4601  {
4602  calculatedSumFreeSize += subAlloc.size;
4603  ++calculatedFreeCount;
4604  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4605  {
4606  ++freeSuballocationsToRegister;
4607  }
4608  }
4609 
4610  calculatedOffset += subAlloc.size;
4611  }
4612 
4613  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4614  // match expected one.
4615  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4616  {
4617  return false;
4618  }
4619 
4620  VkDeviceSize lastSize = 0;
4621  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4622  {
4623  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4624 
4625  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4626  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4627  {
4628  return false;
4629  }
4630  // They must be sorted by size ascending.
4631  if(suballocItem->size < lastSize)
4632  {
4633  return false;
4634  }
4635 
4636  lastSize = suballocItem->size;
4637  }
4638 
4639  // Check if totals match calculacted values.
4640  return
4641  ValidateFreeSuballocationList() &&
4642  (calculatedOffset == m_Size) &&
4643  (calculatedSumFreeSize == m_SumFreeSize) &&
4644  (calculatedFreeCount == m_FreeCount);
4645 }
4646 
4647 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
4648 {
4649  if(!m_FreeSuballocationsBySize.empty())
4650  {
4651  return m_FreeSuballocationsBySize.back()->size;
4652  }
4653  else
4654  {
4655  return 0;
4656  }
4657 }
4658 
4659 bool VmaBlockMetadata::IsEmpty() const
4660 {
4661  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4662 }
4663 
4664 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
4665 {
4666  outInfo.blockCount = 1;
4667 
4668  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4669  outInfo.allocationCount = rangeCount - m_FreeCount;
4670  outInfo.unusedRangeCount = m_FreeCount;
4671 
4672  outInfo.unusedBytes = m_SumFreeSize;
4673  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
4674 
4675  outInfo.allocationSizeMin = UINT64_MAX;
4676  outInfo.allocationSizeMax = 0;
4677  outInfo.unusedRangeSizeMin = UINT64_MAX;
4678  outInfo.unusedRangeSizeMax = 0;
4679 
4680  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4681  suballocItem != m_Suballocations.cend();
4682  ++suballocItem)
4683  {
4684  const VmaSuballocation& suballoc = *suballocItem;
4685  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
4686  {
4687  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
4688  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
4689  }
4690  else
4691  {
4692  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
4693  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
4694  }
4695  }
4696 }
4697 
4698 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
4699 {
4700  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4701 
4702  inoutStats.size += m_Size;
4703  inoutStats.unusedSize += m_SumFreeSize;
4704  inoutStats.allocationCount += rangeCount - m_FreeCount;
4705  inoutStats.unusedRangeCount += m_FreeCount;
4706  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
4707 }
4708 
4709 #if VMA_STATS_STRING_ENABLED
4710 
4711 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
4712 {
4713  json.BeginObject();
4714 
4715  json.WriteString("TotalBytes");
4716  json.WriteNumber(m_Size);
4717 
4718  json.WriteString("UnusedBytes");
4719  json.WriteNumber(m_SumFreeSize);
4720 
4721  json.WriteString("Allocations");
4722  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4723 
4724  json.WriteString("UnusedRanges");
4725  json.WriteNumber(m_FreeCount);
4726 
4727  json.WriteString("Suballocations");
4728  json.BeginArray();
4729  size_t i = 0;
4730  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4731  suballocItem != m_Suballocations.cend();
4732  ++suballocItem, ++i)
4733  {
4734  json.BeginObject(true);
4735 
4736  json.WriteString("Type");
4737  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4738 
4739  json.WriteString("Size");
4740  json.WriteNumber(suballocItem->size);
4741 
4742  json.WriteString("Offset");
4743  json.WriteNumber(suballocItem->offset);
4744 
4745  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4746  {
4747  const void* pUserData = suballocItem->hAllocation->GetUserData();
4748  if(pUserData != VMA_NULL)
4749  {
4750  json.WriteString("UserData");
4751  if(suballocItem->hAllocation->IsUserDataString())
4752  {
4753  json.WriteString((const char*)pUserData);
4754  }
4755  else
4756  {
4757  json.BeginString();
4758  json.ContinueString_Pointer(pUserData);
4759  json.EndString();
4760  }
4761  }
4762  }
4763 
4764  json.EndObject();
4765  }
4766  json.EndArray();
4767 
4768  json.EndObject();
4769 }
4770 
4771 #endif // #if VMA_STATS_STRING_ENABLED
4772 
4773 /*
4774 How many suitable free suballocations to analyze before choosing best one.
4775 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4776  be chosen.
4777 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4778  suballocations will be analized and best one will be chosen.
4779 - Any other value is also acceptable.
4780 */
4781 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4782 
4783 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
4784 {
4785  VMA_ASSERT(IsEmpty());
4786  pAllocationRequest->offset = 0;
4787  pAllocationRequest->sumFreeSize = m_SumFreeSize;
4788  pAllocationRequest->sumItemSize = 0;
4789  pAllocationRequest->item = m_Suballocations.begin();
4790  pAllocationRequest->itemsToMakeLostCount = 0;
4791 }
4792 
4793 bool VmaBlockMetadata::CreateAllocationRequest(
4794  uint32_t currentFrameIndex,
4795  uint32_t frameInUseCount,
4796  VkDeviceSize bufferImageGranularity,
4797  VkDeviceSize allocSize,
4798  VkDeviceSize allocAlignment,
4799  VmaSuballocationType allocType,
4800  bool canMakeOtherLost,
4801  VmaAllocationRequest* pAllocationRequest)
4802 {
4803  VMA_ASSERT(allocSize > 0);
4804  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4805  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4806  VMA_HEAVY_ASSERT(Validate());
4807 
4808  // There is not enough total free space in this block to fullfill the request: Early return.
4809  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4810  {
4811  return false;
4812  }
4813 
4814  // New algorithm, efficiently searching freeSuballocationsBySize.
4815  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4816  if(freeSuballocCount > 0)
4817  {
4818  if(VMA_BEST_FIT)
4819  {
4820  // Find first free suballocation with size not less than allocSize.
4821  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4822  m_FreeSuballocationsBySize.data(),
4823  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4824  allocSize,
4825  VmaSuballocationItemSizeLess());
4826  size_t index = it - m_FreeSuballocationsBySize.data();
4827  for(; index < freeSuballocCount; ++index)
4828  {
4829  if(CheckAllocation(
4830  currentFrameIndex,
4831  frameInUseCount,
4832  bufferImageGranularity,
4833  allocSize,
4834  allocAlignment,
4835  allocType,
4836  m_FreeSuballocationsBySize[index],
4837  false, // canMakeOtherLost
4838  &pAllocationRequest->offset,
4839  &pAllocationRequest->itemsToMakeLostCount,
4840  &pAllocationRequest->sumFreeSize,
4841  &pAllocationRequest->sumItemSize))
4842  {
4843  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4844  return true;
4845  }
4846  }
4847  }
4848  else
4849  {
4850  // Search staring from biggest suballocations.
4851  for(size_t index = freeSuballocCount; index--; )
4852  {
4853  if(CheckAllocation(
4854  currentFrameIndex,
4855  frameInUseCount,
4856  bufferImageGranularity,
4857  allocSize,
4858  allocAlignment,
4859  allocType,
4860  m_FreeSuballocationsBySize[index],
4861  false, // canMakeOtherLost
4862  &pAllocationRequest->offset,
4863  &pAllocationRequest->itemsToMakeLostCount,
4864  &pAllocationRequest->sumFreeSize,
4865  &pAllocationRequest->sumItemSize))
4866  {
4867  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4868  return true;
4869  }
4870  }
4871  }
4872  }
4873 
4874  if(canMakeOtherLost)
4875  {
4876  // Brute-force algorithm. TODO: Come up with something better.
4877 
4878  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4879  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4880 
4881  VmaAllocationRequest tmpAllocRequest = {};
4882  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4883  suballocIt != m_Suballocations.end();
4884  ++suballocIt)
4885  {
4886  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4887  suballocIt->hAllocation->CanBecomeLost())
4888  {
4889  if(CheckAllocation(
4890  currentFrameIndex,
4891  frameInUseCount,
4892  bufferImageGranularity,
4893  allocSize,
4894  allocAlignment,
4895  allocType,
4896  suballocIt,
4897  canMakeOtherLost,
4898  &tmpAllocRequest.offset,
4899  &tmpAllocRequest.itemsToMakeLostCount,
4900  &tmpAllocRequest.sumFreeSize,
4901  &tmpAllocRequest.sumItemSize))
4902  {
4903  tmpAllocRequest.item = suballocIt;
4904 
4905  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4906  {
4907  *pAllocationRequest = tmpAllocRequest;
4908  }
4909  }
4910  }
4911  }
4912 
4913  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4914  {
4915  return true;
4916  }
4917  }
4918 
4919  return false;
4920 }
4921 
4922 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
4923  uint32_t currentFrameIndex,
4924  uint32_t frameInUseCount,
4925  VmaAllocationRequest* pAllocationRequest)
4926 {
4927  while(pAllocationRequest->itemsToMakeLostCount > 0)
4928  {
4929  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4930  {
4931  ++pAllocationRequest->item;
4932  }
4933  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4934  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4935  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4936  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4937  {
4938  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4939  --pAllocationRequest->itemsToMakeLostCount;
4940  }
4941  else
4942  {
4943  return false;
4944  }
4945  }
4946 
4947  VMA_HEAVY_ASSERT(Validate());
4948  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4949  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4950 
4951  return true;
4952 }
4953 
4954 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4955 {
4956  uint32_t lostAllocationCount = 0;
4957  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4958  it != m_Suballocations.end();
4959  ++it)
4960  {
4961  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4962  it->hAllocation->CanBecomeLost() &&
4963  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4964  {
4965  it = FreeSuballocation(it);
4966  ++lostAllocationCount;
4967  }
4968  }
4969  return lostAllocationCount;
4970 }
4971 
4972 void VmaBlockMetadata::Alloc(
4973  const VmaAllocationRequest& request,
4974  VmaSuballocationType type,
4975  VkDeviceSize allocSize,
4976  VmaAllocation hAllocation)
4977 {
4978  VMA_ASSERT(request.item != m_Suballocations.end());
4979  VmaSuballocation& suballoc = *request.item;
4980  // Given suballocation is a free block.
4981  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4982  // Given offset is inside this suballocation.
4983  VMA_ASSERT(request.offset >= suballoc.offset);
4984  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
4985  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
4986  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
4987 
4988  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
4989  // it to become used.
4990  UnregisterFreeSuballocation(request.item);
4991 
4992  suballoc.offset = request.offset;
4993  suballoc.size = allocSize;
4994  suballoc.type = type;
4995  suballoc.hAllocation = hAllocation;
4996 
4997  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
4998  if(paddingEnd)
4999  {
5000  VmaSuballocation paddingSuballoc = {};
5001  paddingSuballoc.offset = request.offset + allocSize;
5002  paddingSuballoc.size = paddingEnd;
5003  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5004  VmaSuballocationList::iterator next = request.item;
5005  ++next;
5006  const VmaSuballocationList::iterator paddingEndItem =
5007  m_Suballocations.insert(next, paddingSuballoc);
5008  RegisterFreeSuballocation(paddingEndItem);
5009  }
5010 
5011  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5012  if(paddingBegin)
5013  {
5014  VmaSuballocation paddingSuballoc = {};
5015  paddingSuballoc.offset = request.offset - paddingBegin;
5016  paddingSuballoc.size = paddingBegin;
5017  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5018  const VmaSuballocationList::iterator paddingBeginItem =
5019  m_Suballocations.insert(request.item, paddingSuballoc);
5020  RegisterFreeSuballocation(paddingBeginItem);
5021  }
5022 
5023  // Update totals.
5024  m_FreeCount = m_FreeCount - 1;
5025  if(paddingBegin > 0)
5026  {
5027  ++m_FreeCount;
5028  }
5029  if(paddingEnd > 0)
5030  {
5031  ++m_FreeCount;
5032  }
5033  m_SumFreeSize -= allocSize;
5034 }
5035 
5036 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5037 {
5038  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5039  suballocItem != m_Suballocations.end();
5040  ++suballocItem)
5041  {
5042  VmaSuballocation& suballoc = *suballocItem;
5043  if(suballoc.hAllocation == allocation)
5044  {
5045  FreeSuballocation(suballocItem);
5046  VMA_HEAVY_ASSERT(Validate());
5047  return;
5048  }
5049  }
5050  VMA_ASSERT(0 && "Not found!");
5051 }
5052 
5053 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5054 {
5055  VkDeviceSize lastSize = 0;
5056  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5057  {
5058  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5059 
5060  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5061  {
5062  VMA_ASSERT(0);
5063  return false;
5064  }
5065  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5066  {
5067  VMA_ASSERT(0);
5068  return false;
5069  }
5070  if(it->size < lastSize)
5071  {
5072  VMA_ASSERT(0);
5073  return false;
5074  }
5075 
5076  lastSize = it->size;
5077  }
5078  return true;
5079 }
5080 
5081 bool VmaBlockMetadata::CheckAllocation(
5082  uint32_t currentFrameIndex,
5083  uint32_t frameInUseCount,
5084  VkDeviceSize bufferImageGranularity,
5085  VkDeviceSize allocSize,
5086  VkDeviceSize allocAlignment,
5087  VmaSuballocationType allocType,
5088  VmaSuballocationList::const_iterator suballocItem,
5089  bool canMakeOtherLost,
5090  VkDeviceSize* pOffset,
5091  size_t* itemsToMakeLostCount,
5092  VkDeviceSize* pSumFreeSize,
5093  VkDeviceSize* pSumItemSize) const
5094 {
5095  VMA_ASSERT(allocSize > 0);
5096  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5097  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5098  VMA_ASSERT(pOffset != VMA_NULL);
5099 
5100  *itemsToMakeLostCount = 0;
5101  *pSumFreeSize = 0;
5102  *pSumItemSize = 0;
5103 
5104  if(canMakeOtherLost)
5105  {
5106  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5107  {
5108  *pSumFreeSize = suballocItem->size;
5109  }
5110  else
5111  {
5112  if(suballocItem->hAllocation->CanBecomeLost() &&
5113  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5114  {
5115  ++*itemsToMakeLostCount;
5116  *pSumItemSize = suballocItem->size;
5117  }
5118  else
5119  {
5120  return false;
5121  }
5122  }
5123 
5124  // Remaining size is too small for this request: Early return.
5125  if(m_Size - suballocItem->offset < allocSize)
5126  {
5127  return false;
5128  }
5129 
5130  // Start from offset equal to beginning of this suballocation.
5131  *pOffset = suballocItem->offset;
5132 
5133  // Apply VMA_DEBUG_MARGIN at the beginning.
5134  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5135  {
5136  *pOffset += VMA_DEBUG_MARGIN;
5137  }
5138 
5139  // Apply alignment.
5140  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5141  *pOffset = VmaAlignUp(*pOffset, alignment);
5142 
5143  // Check previous suballocations for BufferImageGranularity conflicts.
5144  // Make bigger alignment if necessary.
5145  if(bufferImageGranularity > 1)
5146  {
5147  bool bufferImageGranularityConflict = false;
5148  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5149  while(prevSuballocItem != m_Suballocations.cbegin())
5150  {
5151  --prevSuballocItem;
5152  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5153  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5154  {
5155  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5156  {
5157  bufferImageGranularityConflict = true;
5158  break;
5159  }
5160  }
5161  else
5162  // Already on previous page.
5163  break;
5164  }
5165  if(bufferImageGranularityConflict)
5166  {
5167  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5168  }
5169  }
5170 
5171  // Now that we have final *pOffset, check if we are past suballocItem.
5172  // If yes, return false - this function should be called for another suballocItem as starting point.
5173  if(*pOffset >= suballocItem->offset + suballocItem->size)
5174  {
5175  return false;
5176  }
5177 
5178  // Calculate padding at the beginning based on current offset.
5179  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5180 
5181  // Calculate required margin at the end if this is not last suballocation.
5182  VmaSuballocationList::const_iterator next = suballocItem;
5183  ++next;
5184  const VkDeviceSize requiredEndMargin =
5185  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5186 
5187  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5188  // Another early return check.
5189  if(suballocItem->offset + totalSize > m_Size)
5190  {
5191  return false;
5192  }
5193 
5194  // Advance lastSuballocItem until desired size is reached.
5195  // Update itemsToMakeLostCount.
5196  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5197  if(totalSize > suballocItem->size)
5198  {
5199  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5200  while(remainingSize > 0)
5201  {
5202  ++lastSuballocItem;
5203  if(lastSuballocItem == m_Suballocations.cend())
5204  {
5205  return false;
5206  }
5207  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5208  {
5209  *pSumFreeSize += lastSuballocItem->size;
5210  }
5211  else
5212  {
5213  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5214  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5215  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5216  {
5217  ++*itemsToMakeLostCount;
5218  *pSumItemSize += lastSuballocItem->size;
5219  }
5220  else
5221  {
5222  return false;
5223  }
5224  }
5225  remainingSize = (lastSuballocItem->size < remainingSize) ?
5226  remainingSize - lastSuballocItem->size : 0;
5227  }
5228  }
5229 
5230  // Check next suballocations for BufferImageGranularity conflicts.
5231  // If conflict exists, we must mark more allocations lost or fail.
5232  if(bufferImageGranularity > 1)
5233  {
5234  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5235  ++nextSuballocItem;
5236  while(nextSuballocItem != m_Suballocations.cend())
5237  {
5238  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5239  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5240  {
5241  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5242  {
5243  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5244  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5245  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5246  {
5247  ++*itemsToMakeLostCount;
5248  }
5249  else
5250  {
5251  return false;
5252  }
5253  }
5254  }
5255  else
5256  {
5257  // Already on next page.
5258  break;
5259  }
5260  ++nextSuballocItem;
5261  }
5262  }
5263  }
5264  else
5265  {
5266  const VmaSuballocation& suballoc = *suballocItem;
5267  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5268 
5269  *pSumFreeSize = suballoc.size;
5270 
5271  // Size of this suballocation is too small for this request: Early return.
5272  if(suballoc.size < allocSize)
5273  {
5274  return false;
5275  }
5276 
5277  // Start from offset equal to beginning of this suballocation.
5278  *pOffset = suballoc.offset;
5279 
5280  // Apply VMA_DEBUG_MARGIN at the beginning.
5281  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5282  {
5283  *pOffset += VMA_DEBUG_MARGIN;
5284  }
5285 
5286  // Apply alignment.
5287  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5288  *pOffset = VmaAlignUp(*pOffset, alignment);
5289 
5290  // Check previous suballocations for BufferImageGranularity conflicts.
5291  // Make bigger alignment if necessary.
5292  if(bufferImageGranularity > 1)
5293  {
5294  bool bufferImageGranularityConflict = false;
5295  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5296  while(prevSuballocItem != m_Suballocations.cbegin())
5297  {
5298  --prevSuballocItem;
5299  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5300  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5301  {
5302  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5303  {
5304  bufferImageGranularityConflict = true;
5305  break;
5306  }
5307  }
5308  else
5309  // Already on previous page.
5310  break;
5311  }
5312  if(bufferImageGranularityConflict)
5313  {
5314  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5315  }
5316  }
5317 
5318  // Calculate padding at the beginning based on current offset.
5319  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5320 
5321  // Calculate required margin at the end if this is not last suballocation.
5322  VmaSuballocationList::const_iterator next = suballocItem;
5323  ++next;
5324  const VkDeviceSize requiredEndMargin =
5325  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5326 
5327  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5328  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5329  {
5330  return false;
5331  }
5332 
5333  // Check next suballocations for BufferImageGranularity conflicts.
5334  // If conflict exists, allocation cannot be made here.
5335  if(bufferImageGranularity > 1)
5336  {
5337  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5338  ++nextSuballocItem;
5339  while(nextSuballocItem != m_Suballocations.cend())
5340  {
5341  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5342  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5343  {
5344  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5345  {
5346  return false;
5347  }
5348  }
5349  else
5350  {
5351  // Already on next page.
5352  break;
5353  }
5354  ++nextSuballocItem;
5355  }
5356  }
5357  }
5358 
5359  // All tests passed: Success. pOffset is already filled.
5360  return true;
5361 }
5362 
5363 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5364 {
5365  VMA_ASSERT(item != m_Suballocations.end());
5366  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5367 
5368  VmaSuballocationList::iterator nextItem = item;
5369  ++nextItem;
5370  VMA_ASSERT(nextItem != m_Suballocations.end());
5371  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5372 
5373  item->size += nextItem->size;
5374  --m_FreeCount;
5375  m_Suballocations.erase(nextItem);
5376 }
5377 
5378 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5379 {
5380  // Change this suballocation to be marked as free.
5381  VmaSuballocation& suballoc = *suballocItem;
5382  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5383  suballoc.hAllocation = VK_NULL_HANDLE;
5384 
5385  // Update totals.
5386  ++m_FreeCount;
5387  m_SumFreeSize += suballoc.size;
5388 
5389  // Merge with previous and/or next suballocation if it's also free.
5390  bool mergeWithNext = false;
5391  bool mergeWithPrev = false;
5392 
5393  VmaSuballocationList::iterator nextItem = suballocItem;
5394  ++nextItem;
5395  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5396  {
5397  mergeWithNext = true;
5398  }
5399 
5400  VmaSuballocationList::iterator prevItem = suballocItem;
5401  if(suballocItem != m_Suballocations.begin())
5402  {
5403  --prevItem;
5404  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5405  {
5406  mergeWithPrev = true;
5407  }
5408  }
5409 
5410  if(mergeWithNext)
5411  {
5412  UnregisterFreeSuballocation(nextItem);
5413  MergeFreeWithNext(suballocItem);
5414  }
5415 
5416  if(mergeWithPrev)
5417  {
5418  UnregisterFreeSuballocation(prevItem);
5419  MergeFreeWithNext(prevItem);
5420  RegisterFreeSuballocation(prevItem);
5421  return prevItem;
5422  }
5423  else
5424  {
5425  RegisterFreeSuballocation(suballocItem);
5426  return suballocItem;
5427  }
5428 }
5429 
5430 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5431 {
5432  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5433  VMA_ASSERT(item->size > 0);
5434 
5435  // You may want to enable this validation at the beginning or at the end of
5436  // this function, depending on what do you want to check.
5437  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5438 
5439  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5440  {
5441  if(m_FreeSuballocationsBySize.empty())
5442  {
5443  m_FreeSuballocationsBySize.push_back(item);
5444  }
5445  else
5446  {
5447  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5448  }
5449  }
5450 
5451  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5452 }
5453 
5454 
5455 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5456 {
5457  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5458  VMA_ASSERT(item->size > 0);
5459 
5460  // You may want to enable this validation at the beginning or at the end of
5461  // this function, depending on what do you want to check.
5462  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5463 
5464  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5465  {
5466  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5467  m_FreeSuballocationsBySize.data(),
5468  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5469  item,
5470  VmaSuballocationItemSizeLess());
5471  for(size_t index = it - m_FreeSuballocationsBySize.data();
5472  index < m_FreeSuballocationsBySize.size();
5473  ++index)
5474  {
5475  if(m_FreeSuballocationsBySize[index] == item)
5476  {
5477  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5478  return;
5479  }
5480  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5481  }
5482  VMA_ASSERT(0 && "Not found.");
5483  }
5484 
5485  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5486 }
5487 
5489 // class VmaDeviceMemoryMapping
5490 
5491 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
5492  m_MapCount(0),
5493  m_pMappedData(VMA_NULL)
5494 {
5495 }
5496 
5497 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
5498 {
5499  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5500 }
5501 
5502 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData)
5503 {
5504  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5505  if(m_MapCount != 0)
5506  {
5507  ++m_MapCount;
5508  VMA_ASSERT(m_pMappedData != VMA_NULL);
5509  if(ppData != VMA_NULL)
5510  {
5511  *ppData = m_pMappedData;
5512  }
5513  return VK_SUCCESS;
5514  }
5515  else
5516  {
5517  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5518  hAllocator->m_hDevice,
5519  hMemory,
5520  0, // offset
5521  VK_WHOLE_SIZE,
5522  0, // flags
5523  &m_pMappedData);
5524  if(result == VK_SUCCESS)
5525  {
5526  if(ppData != VMA_NULL)
5527  {
5528  *ppData = m_pMappedData;
5529  }
5530  m_MapCount = 1;
5531  }
5532  return result;
5533  }
5534 }
5535 
5536 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory)
5537 {
5538  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5539  if(m_MapCount != 0)
5540  {
5541  if(--m_MapCount == 0)
5542  {
5543  m_pMappedData = VMA_NULL;
5544  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
5545  }
5546  }
5547  else
5548  {
5549  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
5550  }
5551 }
5552 
5554 // class VmaDeviceMemoryBlock
5555 
5556 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5557  m_MemoryTypeIndex(UINT32_MAX),
5558  m_hMemory(VK_NULL_HANDLE),
5559  m_Metadata(hAllocator)
5560 {
5561 }
5562 
5563 void VmaDeviceMemoryBlock::Init(
5564  uint32_t newMemoryTypeIndex,
5565  VkDeviceMemory newMemory,
5566  VkDeviceSize newSize)
5567 {
5568  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5569 
5570  m_MemoryTypeIndex = newMemoryTypeIndex;
5571  m_hMemory = newMemory;
5572 
5573  m_Metadata.Init(newSize);
5574 }
5575 
5576 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
5577 {
5578  // This is the most important assert in the entire library.
5579  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
5580  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
5581 
5582  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
5583  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
5584  m_hMemory = VK_NULL_HANDLE;
5585 }
5586 
5587 bool VmaDeviceMemoryBlock::Validate() const
5588 {
5589  if((m_hMemory == VK_NULL_HANDLE) ||
5590  (m_Metadata.GetSize() == 0))
5591  {
5592  return false;
5593  }
5594 
5595  return m_Metadata.Validate();
5596 }
5597 
5598 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, void** ppData)
5599 {
5600  return m_Mapping.Map(hAllocator, m_hMemory, ppData);
5601 }
5602 
5603 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator)
5604 {
5605  m_Mapping.Unmap(hAllocator, m_hMemory);
5606 }
5607 
5608 static void InitStatInfo(VmaStatInfo& outInfo)
5609 {
5610  memset(&outInfo, 0, sizeof(outInfo));
5611  outInfo.allocationSizeMin = UINT64_MAX;
5612  outInfo.unusedRangeSizeMin = UINT64_MAX;
5613 }
5614 
5615 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5616 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5617 {
5618  inoutInfo.blockCount += srcInfo.blockCount;
5619  inoutInfo.allocationCount += srcInfo.allocationCount;
5620  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
5621  inoutInfo.usedBytes += srcInfo.usedBytes;
5622  inoutInfo.unusedBytes += srcInfo.unusedBytes;
5623  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
5624  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
5625  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
5626  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
5627 }
5628 
5629 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5630 {
5631  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
5632  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
5633  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
5634  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
5635 }
5636 
5637 VmaPool_T::VmaPool_T(
5638  VmaAllocator hAllocator,
5639  const VmaPoolCreateInfo& createInfo) :
5640  m_BlockVector(
5641  hAllocator,
5642  createInfo.memoryTypeIndex,
5643  createInfo.blockSize,
5644  createInfo.minBlockCount,
5645  createInfo.maxBlockCount,
5646  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5647  createInfo.frameInUseCount,
5648  true) // isCustomPool
5649 {
5650 }
5651 
5652 VmaPool_T::~VmaPool_T()
5653 {
5654 }
5655 
5656 #if VMA_STATS_STRING_ENABLED
5657 
5658 #endif // #if VMA_STATS_STRING_ENABLED
5659 
5660 VmaBlockVector::VmaBlockVector(
5661  VmaAllocator hAllocator,
5662  uint32_t memoryTypeIndex,
5663  VkDeviceSize preferredBlockSize,
5664  size_t minBlockCount,
5665  size_t maxBlockCount,
5666  VkDeviceSize bufferImageGranularity,
5667  uint32_t frameInUseCount,
5668  bool isCustomPool) :
5669  m_hAllocator(hAllocator),
5670  m_MemoryTypeIndex(memoryTypeIndex),
5671  m_PreferredBlockSize(preferredBlockSize),
5672  m_MinBlockCount(minBlockCount),
5673  m_MaxBlockCount(maxBlockCount),
5674  m_BufferImageGranularity(bufferImageGranularity),
5675  m_FrameInUseCount(frameInUseCount),
5676  m_IsCustomPool(isCustomPool),
5677  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5678  m_HasEmptyBlock(false),
5679  m_pDefragmentator(VMA_NULL)
5680 {
5681 }
5682 
5683 VmaBlockVector::~VmaBlockVector()
5684 {
5685  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5686 
5687  for(size_t i = m_Blocks.size(); i--; )
5688  {
5689  m_Blocks[i]->Destroy(m_hAllocator);
5690  vma_delete(m_hAllocator, m_Blocks[i]);
5691  }
5692 }
5693 
5694 VkResult VmaBlockVector::CreateMinBlocks()
5695 {
5696  for(size_t i = 0; i < m_MinBlockCount; ++i)
5697  {
5698  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5699  if(res != VK_SUCCESS)
5700  {
5701  return res;
5702  }
5703  }
5704  return VK_SUCCESS;
5705 }
5706 
5707 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5708 {
5709  pStats->size = 0;
5710  pStats->unusedSize = 0;
5711  pStats->allocationCount = 0;
5712  pStats->unusedRangeCount = 0;
5713  pStats->unusedRangeSizeMax = 0;
5714 
5715  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5716 
5717  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5718  {
5719  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5720  VMA_ASSERT(pBlock);
5721  VMA_HEAVY_ASSERT(pBlock->Validate());
5722  pBlock->m_Metadata.AddPoolStats(*pStats);
5723  }
5724 }
5725 
5726 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5727 
5728 VkResult VmaBlockVector::Allocate(
5729  VmaPool hCurrentPool,
5730  uint32_t currentFrameIndex,
5731  const VkMemoryRequirements& vkMemReq,
5732  const VmaAllocationCreateInfo& createInfo,
5733  VmaSuballocationType suballocType,
5734  VmaAllocation* pAllocation)
5735 {
5736  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
5737  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
5738 
5739  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5740 
5741  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5742  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5743  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5744  {
5745  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5746  VMA_ASSERT(pCurrBlock);
5747  VmaAllocationRequest currRequest = {};
5748  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5749  currentFrameIndex,
5750  m_FrameInUseCount,
5751  m_BufferImageGranularity,
5752  vkMemReq.size,
5753  vkMemReq.alignment,
5754  suballocType,
5755  false, // canMakeOtherLost
5756  &currRequest))
5757  {
5758  // Allocate from pCurrBlock.
5759  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5760 
5761  if(mapped)
5762  {
5763  VkResult res = pCurrBlock->Map(m_hAllocator, nullptr);
5764  if(res != VK_SUCCESS)
5765  {
5766  return res;
5767  }
5768  }
5769 
5770  // We no longer have an empty Allocation.
5771  if(pCurrBlock->m_Metadata.IsEmpty())
5772  {
5773  m_HasEmptyBlock = false;
5774  }
5775 
5776  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
5777  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5778  (*pAllocation)->InitBlockAllocation(
5779  hCurrentPool,
5780  pCurrBlock,
5781  currRequest.offset,
5782  vkMemReq.alignment,
5783  vkMemReq.size,
5784  suballocType,
5785  mapped,
5786  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5787  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5788  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5789  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
5790  return VK_SUCCESS;
5791  }
5792  }
5793 
5794  const bool canCreateNewBlock =
5795  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5796  (m_Blocks.size() < m_MaxBlockCount);
5797 
5798  // 2. Try to create new block.
5799  if(canCreateNewBlock)
5800  {
5801  // 2.1. Start with full preferredBlockSize.
5802  VkDeviceSize blockSize = m_PreferredBlockSize;
5803  size_t newBlockIndex = 0;
5804  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5805  // Allocating blocks of other sizes is allowed only in default pools.
5806  // In custom pools block size is fixed.
5807  if(res < 0 && m_IsCustomPool == false)
5808  {
5809  // 2.2. Try half the size.
5810  blockSize /= 2;
5811  if(blockSize >= vkMemReq.size)
5812  {
5813  res = CreateBlock(blockSize, &newBlockIndex);
5814  if(res < 0)
5815  {
5816  // 2.3. Try quarter the size.
5817  blockSize /= 2;
5818  if(blockSize >= vkMemReq.size)
5819  {
5820  res = CreateBlock(blockSize, &newBlockIndex);
5821  }
5822  }
5823  }
5824  }
5825  if(res == VK_SUCCESS)
5826  {
5827  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5828  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
5829 
5830  if(mapped)
5831  {
5832  res = pBlock->Map(m_hAllocator, nullptr);
5833  if(res != VK_SUCCESS)
5834  {
5835  return res;
5836  }
5837  }
5838 
5839  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5840  VmaAllocationRequest allocRequest;
5841  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
5842  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
5843  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5844  (*pAllocation)->InitBlockAllocation(
5845  hCurrentPool,
5846  pBlock,
5847  allocRequest.offset,
5848  vkMemReq.alignment,
5849  vkMemReq.size,
5850  suballocType,
5851  mapped,
5852  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5853  VMA_HEAVY_ASSERT(pBlock->Validate());
5854  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5855  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
5856  return VK_SUCCESS;
5857  }
5858  }
5859 
5860  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5861 
5862  // 3. Try to allocate from existing blocks with making other allocations lost.
5863  if(canMakeOtherLost)
5864  {
5865  uint32_t tryIndex = 0;
5866  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5867  {
5868  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5869  VmaAllocationRequest bestRequest = {};
5870  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5871 
5872  // 1. Search existing allocations.
5873  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5874  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5875  {
5876  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5877  VMA_ASSERT(pCurrBlock);
5878  VmaAllocationRequest currRequest = {};
5879  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5880  currentFrameIndex,
5881  m_FrameInUseCount,
5882  m_BufferImageGranularity,
5883  vkMemReq.size,
5884  vkMemReq.alignment,
5885  suballocType,
5886  canMakeOtherLost,
5887  &currRequest))
5888  {
5889  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5890  if(pBestRequestBlock == VMA_NULL ||
5891  currRequestCost < bestRequestCost)
5892  {
5893  pBestRequestBlock = pCurrBlock;
5894  bestRequest = currRequest;
5895  bestRequestCost = currRequestCost;
5896 
5897  if(bestRequestCost == 0)
5898  {
5899  break;
5900  }
5901  }
5902  }
5903  }
5904 
5905  if(pBestRequestBlock != VMA_NULL)
5906  {
5907  if(mapped)
5908  {
5909  VkResult res = pBestRequestBlock->Map(m_hAllocator, nullptr);
5910  if(res != VK_SUCCESS)
5911  {
5912  return res;
5913  }
5914  }
5915 
5916  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
5917  currentFrameIndex,
5918  m_FrameInUseCount,
5919  &bestRequest))
5920  {
5921  // We no longer have an empty Allocation.
5922  if(pBestRequestBlock->m_Metadata.IsEmpty())
5923  {
5924  m_HasEmptyBlock = false;
5925  }
5926  // Allocate from this pBlock.
5927  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
5928  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5929  (*pAllocation)->InitBlockAllocation(
5930  hCurrentPool,
5931  pBestRequestBlock,
5932  bestRequest.offset,
5933  vkMemReq.alignment,
5934  vkMemReq.size,
5935  suballocType,
5936  mapped,
5937  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5938  VMA_HEAVY_ASSERT(pBlock->Validate());
5939  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5940  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
5941  return VK_SUCCESS;
5942  }
5943  // else: Some allocations must have been touched while we are here. Next try.
5944  }
5945  else
5946  {
5947  // Could not find place in any of the blocks - break outer loop.
5948  break;
5949  }
5950  }
5951  /* Maximum number of tries exceeded - a very unlike event when many other
5952  threads are simultaneously touching allocations making it impossible to make
5953  lost at the same time as we try to allocate. */
5954  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5955  {
5956  return VK_ERROR_TOO_MANY_OBJECTS;
5957  }
5958  }
5959 
5960  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5961 }
5962 
5963 void VmaBlockVector::Free(
5964  VmaAllocation hAllocation)
5965 {
5966  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
5967 
5968  // Scope for lock.
5969  {
5970  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5971 
5972  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
5973 
5974  if(hAllocation->IsPersistentMap())
5975  {
5976  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory);
5977  }
5978 
5979  pBlock->m_Metadata.Free(hAllocation);
5980  VMA_HEAVY_ASSERT(pBlock->Validate());
5981 
5982  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
5983 
5984  // pBlock became empty after this deallocation.
5985  if(pBlock->m_Metadata.IsEmpty())
5986  {
5987  // Already has empty Allocation. We don't want to have two, so delete this one.
5988  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
5989  {
5990  pBlockToDelete = pBlock;
5991  Remove(pBlock);
5992  }
5993  // We now have first empty Allocation.
5994  else
5995  {
5996  m_HasEmptyBlock = true;
5997  }
5998  }
5999  // pBlock didn't become empty, but we have another empty block - find and free that one.
6000  // (This is optional, heuristics.)
6001  else if(m_HasEmptyBlock)
6002  {
6003  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6004  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6005  {
6006  pBlockToDelete = pLastBlock;
6007  m_Blocks.pop_back();
6008  m_HasEmptyBlock = false;
6009  }
6010  }
6011 
6012  IncrementallySortBlocks();
6013  }
6014 
6015  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6016  // lock, for performance reason.
6017  if(pBlockToDelete != VMA_NULL)
6018  {
6019  VMA_DEBUG_LOG(" Deleted empty allocation");
6020  pBlockToDelete->Destroy(m_hAllocator);
6021  vma_delete(m_hAllocator, pBlockToDelete);
6022  }
6023 }
6024 
6025 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6026 {
6027  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6028  {
6029  if(m_Blocks[blockIndex] == pBlock)
6030  {
6031  VmaVectorRemove(m_Blocks, blockIndex);
6032  return;
6033  }
6034  }
6035  VMA_ASSERT(0);
6036 }
6037 
6038 void VmaBlockVector::IncrementallySortBlocks()
6039 {
6040  // Bubble sort only until first swap.
6041  for(size_t i = 1; i < m_Blocks.size(); ++i)
6042  {
6043  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6044  {
6045  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6046  return;
6047  }
6048  }
6049 }
6050 
6051 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6052 {
6053  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6054  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6055  allocInfo.allocationSize = blockSize;
6056  VkDeviceMemory mem = VK_NULL_HANDLE;
6057  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6058  if(res < 0)
6059  {
6060  return res;
6061  }
6062 
6063  // New VkDeviceMemory successfully created.
6064 
6065  // Create new Allocation for it.
6066  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6067  pBlock->Init(
6068  m_MemoryTypeIndex,
6069  mem,
6070  allocInfo.allocationSize);
6071 
6072  m_Blocks.push_back(pBlock);
6073  if(pNewBlockIndex != VMA_NULL)
6074  {
6075  *pNewBlockIndex = m_Blocks.size() - 1;
6076  }
6077 
6078  return VK_SUCCESS;
6079 }
6080 
6081 #if VMA_STATS_STRING_ENABLED
6082 
6083 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6084 {
6085  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6086 
6087  json.BeginObject();
6088 
6089  if(m_IsCustomPool)
6090  {
6091  json.WriteString("MemoryTypeIndex");
6092  json.WriteNumber(m_MemoryTypeIndex);
6093 
6094  json.WriteString("BlockSize");
6095  json.WriteNumber(m_PreferredBlockSize);
6096 
6097  json.WriteString("BlockCount");
6098  json.BeginObject(true);
6099  if(m_MinBlockCount > 0)
6100  {
6101  json.WriteString("Min");
6102  json.WriteNumber(m_MinBlockCount);
6103  }
6104  if(m_MaxBlockCount < SIZE_MAX)
6105  {
6106  json.WriteString("Max");
6107  json.WriteNumber(m_MaxBlockCount);
6108  }
6109  json.WriteString("Cur");
6110  json.WriteNumber(m_Blocks.size());
6111  json.EndObject();
6112 
6113  if(m_FrameInUseCount > 0)
6114  {
6115  json.WriteString("FrameInUseCount");
6116  json.WriteNumber(m_FrameInUseCount);
6117  }
6118  }
6119  else
6120  {
6121  json.WriteString("PreferredBlockSize");
6122  json.WriteNumber(m_PreferredBlockSize);
6123  }
6124 
6125  json.WriteString("Blocks");
6126  json.BeginArray();
6127  for(size_t i = 0; i < m_Blocks.size(); ++i)
6128  {
6129  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6130  }
6131  json.EndArray();
6132 
6133  json.EndObject();
6134 }
6135 
6136 #endif // #if VMA_STATS_STRING_ENABLED
6137 
6138 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6139  VmaAllocator hAllocator,
6140  uint32_t currentFrameIndex)
6141 {
6142  if(m_pDefragmentator == VMA_NULL)
6143  {
6144  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6145  hAllocator,
6146  this,
6147  currentFrameIndex);
6148  }
6149 
6150  return m_pDefragmentator;
6151 }
6152 
6153 VkResult VmaBlockVector::Defragment(
6154  VmaDefragmentationStats* pDefragmentationStats,
6155  VkDeviceSize& maxBytesToMove,
6156  uint32_t& maxAllocationsToMove)
6157 {
6158  if(m_pDefragmentator == VMA_NULL)
6159  {
6160  return VK_SUCCESS;
6161  }
6162 
6163  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6164 
6165  // Defragment.
6166  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6167 
6168  // Accumulate statistics.
6169  if(pDefragmentationStats != VMA_NULL)
6170  {
6171  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6172  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6173  pDefragmentationStats->bytesMoved += bytesMoved;
6174  pDefragmentationStats->allocationsMoved += allocationsMoved;
6175  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6176  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6177  maxBytesToMove -= bytesMoved;
6178  maxAllocationsToMove -= allocationsMoved;
6179  }
6180 
6181  // Free empty blocks.
6182  m_HasEmptyBlock = false;
6183  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6184  {
6185  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6186  if(pBlock->m_Metadata.IsEmpty())
6187  {
6188  if(m_Blocks.size() > m_MinBlockCount)
6189  {
6190  if(pDefragmentationStats != VMA_NULL)
6191  {
6192  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6193  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6194  }
6195 
6196  VmaVectorRemove(m_Blocks, blockIndex);
6197  pBlock->Destroy(m_hAllocator);
6198  vma_delete(m_hAllocator, pBlock);
6199  }
6200  else
6201  {
6202  m_HasEmptyBlock = true;
6203  }
6204  }
6205  }
6206 
6207  return result;
6208 }
6209 
6210 void VmaBlockVector::DestroyDefragmentator()
6211 {
6212  if(m_pDefragmentator != VMA_NULL)
6213  {
6214  vma_delete(m_hAllocator, m_pDefragmentator);
6215  m_pDefragmentator = VMA_NULL;
6216  }
6217 }
6218 
6219 void VmaBlockVector::MakePoolAllocationsLost(
6220  uint32_t currentFrameIndex,
6221  size_t* pLostAllocationCount)
6222 {
6223  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6224 
6225  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6226  {
6227  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6228  VMA_ASSERT(pBlock);
6229  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6230  }
6231 }
6232 
6233 void VmaBlockVector::AddStats(VmaStats* pStats)
6234 {
6235  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6236  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6237 
6238  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6239 
6240  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6241  {
6242  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6243  VMA_ASSERT(pBlock);
6244  VMA_HEAVY_ASSERT(pBlock->Validate());
6245  VmaStatInfo allocationStatInfo;
6246  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6247  VmaAddStatInfo(pStats->total, allocationStatInfo);
6248  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6249  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6250  }
6251 }
6252 
6254 // VmaDefragmentator members definition
6255 
6256 VmaDefragmentator::VmaDefragmentator(
6257  VmaAllocator hAllocator,
6258  VmaBlockVector* pBlockVector,
6259  uint32_t currentFrameIndex) :
6260  m_hAllocator(hAllocator),
6261  m_pBlockVector(pBlockVector),
6262  m_CurrentFrameIndex(currentFrameIndex),
6263  m_BytesMoved(0),
6264  m_AllocationsMoved(0),
6265  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6266  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6267 {
6268 }
6269 
6270 VmaDefragmentator::~VmaDefragmentator()
6271 {
6272  for(size_t i = m_Blocks.size(); i--; )
6273  {
6274  vma_delete(m_hAllocator, m_Blocks[i]);
6275  }
6276 }
6277 
6278 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6279 {
6280  AllocationInfo allocInfo;
6281  allocInfo.m_hAllocation = hAlloc;
6282  allocInfo.m_pChanged = pChanged;
6283  m_Allocations.push_back(allocInfo);
6284 }
6285 
6286 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6287 {
6288  // It has already been mapped for defragmentation.
6289  if(m_pMappedDataForDefragmentation)
6290  {
6291  *ppMappedData = m_pMappedDataForDefragmentation;
6292  return VK_SUCCESS;
6293  }
6294 
6295  // It is originally mapped.
6296  if(m_pBlock->m_Mapping.GetMappedData())
6297  {
6298  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6299  return VK_SUCCESS;
6300  }
6301 
6302  // Map on first usage.
6303  VkResult res = m_pBlock->Map(hAllocator, &m_pMappedDataForDefragmentation);
6304  *ppMappedData = m_pMappedDataForDefragmentation;
6305  return res;
6306 }
6307 
6308 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6309 {
6310  if(m_pMappedDataForDefragmentation != VMA_NULL)
6311  {
6312  m_pBlock->Unmap(hAllocator);
6313  }
6314 }
6315 
6316 VkResult VmaDefragmentator::DefragmentRound(
6317  VkDeviceSize maxBytesToMove,
6318  uint32_t maxAllocationsToMove)
6319 {
6320  if(m_Blocks.empty())
6321  {
6322  return VK_SUCCESS;
6323  }
6324 
6325  size_t srcBlockIndex = m_Blocks.size() - 1;
6326  size_t srcAllocIndex = SIZE_MAX;
6327  for(;;)
6328  {
6329  // 1. Find next allocation to move.
6330  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6331  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6332  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6333  {
6334  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6335  {
6336  // Finished: no more allocations to process.
6337  if(srcBlockIndex == 0)
6338  {
6339  return VK_SUCCESS;
6340  }
6341  else
6342  {
6343  --srcBlockIndex;
6344  srcAllocIndex = SIZE_MAX;
6345  }
6346  }
6347  else
6348  {
6349  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6350  }
6351  }
6352 
6353  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6354  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6355 
6356  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6357  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6358  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6359  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6360 
6361  // 2. Try to find new place for this allocation in preceding or current block.
6362  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6363  {
6364  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6365  VmaAllocationRequest dstAllocRequest;
6366  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6367  m_CurrentFrameIndex,
6368  m_pBlockVector->GetFrameInUseCount(),
6369  m_pBlockVector->GetBufferImageGranularity(),
6370  size,
6371  alignment,
6372  suballocType,
6373  false, // canMakeOtherLost
6374  &dstAllocRequest) &&
6375  MoveMakesSense(
6376  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6377  {
6378  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6379 
6380  // Reached limit on number of allocations or bytes to move.
6381  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6382  (m_BytesMoved + size > maxBytesToMove))
6383  {
6384  return VK_INCOMPLETE;
6385  }
6386 
6387  void* pDstMappedData = VMA_NULL;
6388  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6389  if(res != VK_SUCCESS)
6390  {
6391  return res;
6392  }
6393 
6394  void* pSrcMappedData = VMA_NULL;
6395  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6396  if(res != VK_SUCCESS)
6397  {
6398  return res;
6399  }
6400 
6401  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6402  memcpy(
6403  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6404  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6405  static_cast<size_t>(size));
6406 
6407  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6408  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
6409 
6410  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6411 
6412  if(allocInfo.m_pChanged != VMA_NULL)
6413  {
6414  *allocInfo.m_pChanged = VK_TRUE;
6415  }
6416 
6417  ++m_AllocationsMoved;
6418  m_BytesMoved += size;
6419 
6420  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6421 
6422  break;
6423  }
6424  }
6425 
6426  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6427 
6428  if(srcAllocIndex > 0)
6429  {
6430  --srcAllocIndex;
6431  }
6432  else
6433  {
6434  if(srcBlockIndex > 0)
6435  {
6436  --srcBlockIndex;
6437  srcAllocIndex = SIZE_MAX;
6438  }
6439  else
6440  {
6441  return VK_SUCCESS;
6442  }
6443  }
6444  }
6445 }
6446 
6447 VkResult VmaDefragmentator::Defragment(
6448  VkDeviceSize maxBytesToMove,
6449  uint32_t maxAllocationsToMove)
6450 {
6451  if(m_Allocations.empty())
6452  {
6453  return VK_SUCCESS;
6454  }
6455 
6456  // Create block info for each block.
6457  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6458  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6459  {
6460  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6461  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6462  m_Blocks.push_back(pBlockInfo);
6463  }
6464 
6465  // Sort them by m_pBlock pointer value.
6466  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6467 
6468  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6469  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6470  {
6471  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6472  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6473  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6474  {
6475  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6476  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6477  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6478  {
6479  (*it)->m_Allocations.push_back(allocInfo);
6480  }
6481  else
6482  {
6483  VMA_ASSERT(0);
6484  }
6485  }
6486  }
6487  m_Allocations.clear();
6488 
6489  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6490  {
6491  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6492  pBlockInfo->CalcHasNonMovableAllocations();
6493  pBlockInfo->SortAllocationsBySizeDescecnding();
6494  }
6495 
6496  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6497  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6498 
6499  // Execute defragmentation rounds (the main part).
6500  VkResult result = VK_SUCCESS;
6501  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6502  {
6503  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6504  }
6505 
6506  // Unmap blocks that were mapped for defragmentation.
6507  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6508  {
6509  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6510  }
6511 
6512  return result;
6513 }
6514 
6515 bool VmaDefragmentator::MoveMakesSense(
6516  size_t dstBlockIndex, VkDeviceSize dstOffset,
6517  size_t srcBlockIndex, VkDeviceSize srcOffset)
6518 {
6519  if(dstBlockIndex < srcBlockIndex)
6520  {
6521  return true;
6522  }
6523  if(dstBlockIndex > srcBlockIndex)
6524  {
6525  return false;
6526  }
6527  if(dstOffset < srcOffset)
6528  {
6529  return true;
6530  }
6531  return false;
6532 }
6533 
6535 // VmaAllocator_T
6536 
6537 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6538  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6539  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
6540  m_PhysicalDevice(pCreateInfo->physicalDevice),
6541  m_hDevice(pCreateInfo->device),
6542  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6543  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6544  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6545  m_PreferredLargeHeapBlockSize(0),
6546  m_PreferredSmallHeapBlockSize(0),
6547  m_CurrentFrameIndex(0),
6548  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6549 {
6550  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6551 
6552  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6553  memset(&m_MemProps, 0, sizeof(m_MemProps));
6554  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6555 
6556  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6557  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
6558 
6559  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6560  {
6561  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6562  }
6563 
6564  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6565  {
6566  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6567  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6568  }
6569 
6570  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6571 
6572  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6573  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6574 
6575  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6576  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6577  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6578  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6579 
6580  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6581  {
6582  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6583  {
6584  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6585  if(limit != VK_WHOLE_SIZE)
6586  {
6587  m_HeapSizeLimit[heapIndex] = limit;
6588  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6589  {
6590  m_MemProps.memoryHeaps[heapIndex].size = limit;
6591  }
6592  }
6593  }
6594  }
6595 
6596  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6597  {
6598  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6599 
6600  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
6601  this,
6602  memTypeIndex,
6603  preferredBlockSize,
6604  0,
6605  SIZE_MAX,
6606  GetBufferImageGranularity(),
6607  pCreateInfo->frameInUseCount,
6608  false); // isCustomPool
6609  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6610  // becase minBlockCount is 0.
6611  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6612  }
6613 }
6614 
6615 VmaAllocator_T::~VmaAllocator_T()
6616 {
6617  VMA_ASSERT(m_Pools.empty());
6618 
6619  for(size_t i = GetMemoryTypeCount(); i--; )
6620  {
6621  vma_delete(this, m_pDedicatedAllocations[i]);
6622  vma_delete(this, m_pBlockVectors[i]);
6623  }
6624 }
6625 
6626 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6627 {
6628 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6629  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6630  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6631  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6632  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6633  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6634  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6635  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6636  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6637  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6638  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6639  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6640  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6641  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6642  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6643  // Ignoring vkGetBufferMemoryRequirements2KHR.
6644  // Ignoring vkGetImageMemoryRequirements2KHR.
6645 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6646 
6647 #define VMA_COPY_IF_NOT_NULL(funcName) \
6648  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
6649 
6650  if(pVulkanFunctions != VMA_NULL)
6651  {
6652  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
6653  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
6654  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
6655  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
6656  VMA_COPY_IF_NOT_NULL(vkMapMemory);
6657  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
6658  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
6659  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
6660  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
6661  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
6662  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
6663  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
6664  VMA_COPY_IF_NOT_NULL(vkCreateImage);
6665  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
6666  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
6667  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
6668  }
6669 
6670 #undef VMA_COPY_IF_NOT_NULL
6671 
6672  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6673  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6674  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6675  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6676  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6677  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6678  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6679  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6680  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6681  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6682  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6683  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6684  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6685  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6686  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6687  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6688  if(m_UseKhrDedicatedAllocation)
6689  {
6690  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
6691  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
6692  }
6693 }
6694 
6695 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6696 {
6697  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6698  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6699  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6700  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6701 }
6702 
6703 VkResult VmaAllocator_T::AllocateMemoryOfType(
6704  const VkMemoryRequirements& vkMemReq,
6705  bool dedicatedAllocation,
6706  VkBuffer dedicatedBuffer,
6707  VkImage dedicatedImage,
6708  const VmaAllocationCreateInfo& createInfo,
6709  uint32_t memTypeIndex,
6710  VmaSuballocationType suballocType,
6711  VmaAllocation* pAllocation)
6712 {
6713  VMA_ASSERT(pAllocation != VMA_NULL);
6714  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6715 
6716  VmaAllocationCreateInfo finalCreateInfo = createInfo;
6717 
6718  // If memory type is not HOST_VISIBLE, disable MAPPED.
6719  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
6720  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
6721  {
6722  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
6723  }
6724 
6725  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
6726  VMA_ASSERT(blockVector);
6727 
6728  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6729  bool preferDedicatedMemory =
6730  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
6731  dedicatedAllocation ||
6732  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
6733  vkMemReq.size > preferredBlockSize / 2;
6734 
6735  if(preferDedicatedMemory &&
6736  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6737  finalCreateInfo.pool == VK_NULL_HANDLE)
6738  {
6740  }
6741 
6742  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
6743  {
6744  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6745  {
6746  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6747  }
6748  else
6749  {
6750  return AllocateDedicatedMemory(
6751  vkMemReq.size,
6752  suballocType,
6753  memTypeIndex,
6754  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
6755  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
6756  finalCreateInfo.pUserData,
6757  dedicatedBuffer,
6758  dedicatedImage,
6759  pAllocation);
6760  }
6761  }
6762  else
6763  {
6764  VkResult res = blockVector->Allocate(
6765  VK_NULL_HANDLE, // hCurrentPool
6766  m_CurrentFrameIndex.load(),
6767  vkMemReq,
6768  finalCreateInfo,
6769  suballocType,
6770  pAllocation);
6771  if(res == VK_SUCCESS)
6772  {
6773  return res;
6774  }
6775 
6776  // 5. Try dedicated memory.
6777  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6778  {
6779  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6780  }
6781  else
6782  {
6783  res = AllocateDedicatedMemory(
6784  vkMemReq.size,
6785  suballocType,
6786  memTypeIndex,
6787  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
6788  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
6789  finalCreateInfo.pUserData,
6790  dedicatedBuffer,
6791  dedicatedImage,
6792  pAllocation);
6793  if(res == VK_SUCCESS)
6794  {
6795  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
6796  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
6797  return VK_SUCCESS;
6798  }
6799  else
6800  {
6801  // Everything failed: Return error code.
6802  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6803  return res;
6804  }
6805  }
6806  }
6807 }
6808 
6809 VkResult VmaAllocator_T::AllocateDedicatedMemory(
6810  VkDeviceSize size,
6811  VmaSuballocationType suballocType,
6812  uint32_t memTypeIndex,
6813  bool map,
6814  bool isUserDataString,
6815  void* pUserData,
6816  VkBuffer dedicatedBuffer,
6817  VkImage dedicatedImage,
6818  VmaAllocation* pAllocation)
6819 {
6820  VMA_ASSERT(pAllocation);
6821 
6822  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6823  allocInfo.memoryTypeIndex = memTypeIndex;
6824  allocInfo.allocationSize = size;
6825 
6826  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
6827  if(m_UseKhrDedicatedAllocation)
6828  {
6829  if(dedicatedBuffer != VK_NULL_HANDLE)
6830  {
6831  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
6832  dedicatedAllocInfo.buffer = dedicatedBuffer;
6833  allocInfo.pNext = &dedicatedAllocInfo;
6834  }
6835  else if(dedicatedImage != VK_NULL_HANDLE)
6836  {
6837  dedicatedAllocInfo.image = dedicatedImage;
6838  allocInfo.pNext = &dedicatedAllocInfo;
6839  }
6840  }
6841 
6842  // Allocate VkDeviceMemory.
6843  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6844  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
6845  if(res < 0)
6846  {
6847  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6848  return res;
6849  }
6850 
6851  void* pMappedData = nullptr;
6852  if(map)
6853  {
6854  res = (*m_VulkanFunctions.vkMapMemory)(
6855  m_hDevice,
6856  hMemory,
6857  0,
6858  VK_WHOLE_SIZE,
6859  0,
6860  &pMappedData);
6861  if(res < 0)
6862  {
6863  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6864  FreeVulkanMemory(memTypeIndex, size, hMemory);
6865  return res;
6866  }
6867  }
6868 
6869  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
6870  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
6871  (*pAllocation)->SetUserData(this, pUserData);
6872 
6873  // Register it in m_pDedicatedAllocations.
6874  {
6875  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6876  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
6877  VMA_ASSERT(pDedicatedAllocations);
6878  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
6879  }
6880 
6881  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
6882 
6883  return VK_SUCCESS;
6884 }
6885 
6886 void VmaAllocator_T::GetBufferMemoryRequirements(
6887  VkBuffer hBuffer,
6888  VkMemoryRequirements& memReq,
6889  bool& requiresDedicatedAllocation,
6890  bool& prefersDedicatedAllocation) const
6891 {
6892  if(m_UseKhrDedicatedAllocation)
6893  {
6894  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
6895  memReqInfo.buffer = hBuffer;
6896 
6897  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
6898 
6899  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
6900  memReq2.pNext = &memDedicatedReq;
6901 
6902  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
6903 
6904  memReq = memReq2.memoryRequirements;
6905  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
6906  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
6907  }
6908  else
6909  {
6910  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
6911  requiresDedicatedAllocation = false;
6912  prefersDedicatedAllocation = false;
6913  }
6914 }
6915 
6916 void VmaAllocator_T::GetImageMemoryRequirements(
6917  VkImage hImage,
6918  VkMemoryRequirements& memReq,
6919  bool& requiresDedicatedAllocation,
6920  bool& prefersDedicatedAllocation) const
6921 {
6922  if(m_UseKhrDedicatedAllocation)
6923  {
6924  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
6925  memReqInfo.image = hImage;
6926 
6927  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
6928 
6929  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
6930  memReq2.pNext = &memDedicatedReq;
6931 
6932  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
6933 
6934  memReq = memReq2.memoryRequirements;
6935  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
6936  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
6937  }
6938  else
6939  {
6940  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
6941  requiresDedicatedAllocation = false;
6942  prefersDedicatedAllocation = false;
6943  }
6944 }
6945 
6946 VkResult VmaAllocator_T::AllocateMemory(
6947  const VkMemoryRequirements& vkMemReq,
6948  bool requiresDedicatedAllocation,
6949  bool prefersDedicatedAllocation,
6950  VkBuffer dedicatedBuffer,
6951  VkImage dedicatedImage,
6952  const VmaAllocationCreateInfo& createInfo,
6953  VmaSuballocationType suballocType,
6954  VmaAllocation* pAllocation)
6955 {
6956  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
6957  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6958  {
6959  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6960  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6961  }
6962  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
6964  {
6965  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
6966  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6967  }
6968  if(requiresDedicatedAllocation)
6969  {
6970  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6971  {
6972  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
6973  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6974  }
6975  if(createInfo.pool != VK_NULL_HANDLE)
6976  {
6977  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
6978  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6979  }
6980  }
6981  if((createInfo.pool != VK_NULL_HANDLE) &&
6982  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
6983  {
6984  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
6985  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6986  }
6987 
6988  if(createInfo.pool != VK_NULL_HANDLE)
6989  {
6990  return createInfo.pool->m_BlockVector.Allocate(
6991  createInfo.pool,
6992  m_CurrentFrameIndex.load(),
6993  vkMemReq,
6994  createInfo,
6995  suballocType,
6996  pAllocation);
6997  }
6998  else
6999  {
7000  // Bit mask of memory Vulkan types acceptable for this allocation.
7001  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7002  uint32_t memTypeIndex = UINT32_MAX;
7003  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7004  if(res == VK_SUCCESS)
7005  {
7006  res = AllocateMemoryOfType(
7007  vkMemReq,
7008  requiresDedicatedAllocation || prefersDedicatedAllocation,
7009  dedicatedBuffer,
7010  dedicatedImage,
7011  createInfo,
7012  memTypeIndex,
7013  suballocType,
7014  pAllocation);
7015  // Succeeded on first try.
7016  if(res == VK_SUCCESS)
7017  {
7018  return res;
7019  }
7020  // Allocation from this memory type failed. Try other compatible memory types.
7021  else
7022  {
7023  for(;;)
7024  {
7025  // Remove old memTypeIndex from list of possibilities.
7026  memoryTypeBits &= ~(1u << memTypeIndex);
7027  // Find alternative memTypeIndex.
7028  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7029  if(res == VK_SUCCESS)
7030  {
7031  res = AllocateMemoryOfType(
7032  vkMemReq,
7033  requiresDedicatedAllocation || prefersDedicatedAllocation,
7034  dedicatedBuffer,
7035  dedicatedImage,
7036  createInfo,
7037  memTypeIndex,
7038  suballocType,
7039  pAllocation);
7040  // Allocation from this alternative memory type succeeded.
7041  if(res == VK_SUCCESS)
7042  {
7043  return res;
7044  }
7045  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7046  }
7047  // No other matching memory type index could be found.
7048  else
7049  {
7050  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7051  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7052  }
7053  }
7054  }
7055  }
7056  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7057  else
7058  return res;
7059  }
7060 }
7061 
7062 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7063 {
7064  VMA_ASSERT(allocation);
7065 
7066  if(allocation->CanBecomeLost() == false ||
7067  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7068  {
7069  switch(allocation->GetType())
7070  {
7071  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7072  {
7073  VmaBlockVector* pBlockVector = VMA_NULL;
7074  VmaPool hPool = allocation->GetPool();
7075  if(hPool != VK_NULL_HANDLE)
7076  {
7077  pBlockVector = &hPool->m_BlockVector;
7078  }
7079  else
7080  {
7081  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7082  pBlockVector = m_pBlockVectors[memTypeIndex];
7083  }
7084  pBlockVector->Free(allocation);
7085  }
7086  break;
7087  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7088  FreeDedicatedMemory(allocation);
7089  break;
7090  default:
7091  VMA_ASSERT(0);
7092  }
7093  }
7094 
7095  allocation->SetUserData(this, VMA_NULL);
7096  vma_delete(this, allocation);
7097 }
7098 
7099 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7100 {
7101  // Initialize.
7102  InitStatInfo(pStats->total);
7103  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7104  InitStatInfo(pStats->memoryType[i]);
7105  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7106  InitStatInfo(pStats->memoryHeap[i]);
7107 
7108  // Process default pools.
7109  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7110  {
7111  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7112  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7113  VMA_ASSERT(pBlockVector);
7114  pBlockVector->AddStats(pStats);
7115  }
7116 
7117  // Process custom pools.
7118  {
7119  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7120  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7121  {
7122  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7123  }
7124  }
7125 
7126  // Process dedicated allocations.
7127  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7128  {
7129  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7130  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7131  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7132  VMA_ASSERT(pDedicatedAllocVector);
7133  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7134  {
7135  VmaStatInfo allocationStatInfo;
7136  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7137  VmaAddStatInfo(pStats->total, allocationStatInfo);
7138  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7139  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7140  }
7141  }
7142 
7143  // Postprocess.
7144  VmaPostprocessCalcStatInfo(pStats->total);
7145  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7146  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7147  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7148  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7149 }
7150 
7151 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7152 
7153 VkResult VmaAllocator_T::Defragment(
7154  VmaAllocation* pAllocations,
7155  size_t allocationCount,
7156  VkBool32* pAllocationsChanged,
7157  const VmaDefragmentationInfo* pDefragmentationInfo,
7158  VmaDefragmentationStats* pDefragmentationStats)
7159 {
7160  if(pAllocationsChanged != VMA_NULL)
7161  {
7162  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7163  }
7164  if(pDefragmentationStats != VMA_NULL)
7165  {
7166  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7167  }
7168 
7169  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7170 
7171  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7172 
7173  const size_t poolCount = m_Pools.size();
7174 
7175  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7176  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7177  {
7178  VmaAllocation hAlloc = pAllocations[allocIndex];
7179  VMA_ASSERT(hAlloc);
7180  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7181  // DedicatedAlloc cannot be defragmented.
7182  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7183  // Only HOST_VISIBLE memory types can be defragmented.
7184  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7185  // Lost allocation cannot be defragmented.
7186  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7187  {
7188  VmaBlockVector* pAllocBlockVector = nullptr;
7189 
7190  const VmaPool hAllocPool = hAlloc->GetPool();
7191  // This allocation belongs to custom pool.
7192  if(hAllocPool != VK_NULL_HANDLE)
7193  {
7194  pAllocBlockVector = &hAllocPool->GetBlockVector();
7195  }
7196  // This allocation belongs to general pool.
7197  else
7198  {
7199  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7200  }
7201 
7202  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7203 
7204  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7205  &pAllocationsChanged[allocIndex] : VMA_NULL;
7206  pDefragmentator->AddAllocation(hAlloc, pChanged);
7207  }
7208  }
7209 
7210  VkResult result = VK_SUCCESS;
7211 
7212  // ======== Main processing.
7213 
7214  VkDeviceSize maxBytesToMove = SIZE_MAX;
7215  uint32_t maxAllocationsToMove = UINT32_MAX;
7216  if(pDefragmentationInfo != VMA_NULL)
7217  {
7218  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7219  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7220  }
7221 
7222  // Process standard memory.
7223  for(uint32_t memTypeIndex = 0;
7224  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7225  ++memTypeIndex)
7226  {
7227  // Only HOST_VISIBLE memory types can be defragmented.
7228  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7229  {
7230  result = m_pBlockVectors[memTypeIndex]->Defragment(
7231  pDefragmentationStats,
7232  maxBytesToMove,
7233  maxAllocationsToMove);
7234  }
7235  }
7236 
7237  // Process custom pools.
7238  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7239  {
7240  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7241  pDefragmentationStats,
7242  maxBytesToMove,
7243  maxAllocationsToMove);
7244  }
7245 
7246  // ======== Destroy defragmentators.
7247 
7248  // Process custom pools.
7249  for(size_t poolIndex = poolCount; poolIndex--; )
7250  {
7251  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7252  }
7253 
7254  // Process standard memory.
7255  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7256  {
7257  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7258  {
7259  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7260  }
7261  }
7262 
7263  return result;
7264 }
7265 
7266 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7267 {
7268  if(hAllocation->CanBecomeLost())
7269  {
7270  /*
7271  Warning: This is a carefully designed algorithm.
7272  Do not modify unless you really know what you're doing :)
7273  */
7274  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7275  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7276  for(;;)
7277  {
7278  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7279  {
7280  pAllocationInfo->memoryType = UINT32_MAX;
7281  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7282  pAllocationInfo->offset = 0;
7283  pAllocationInfo->size = hAllocation->GetSize();
7284  pAllocationInfo->pMappedData = VMA_NULL;
7285  pAllocationInfo->pUserData = hAllocation->GetUserData();
7286  return;
7287  }
7288  else if(localLastUseFrameIndex == localCurrFrameIndex)
7289  {
7290  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7291  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7292  pAllocationInfo->offset = hAllocation->GetOffset();
7293  pAllocationInfo->size = hAllocation->GetSize();
7294  pAllocationInfo->pMappedData = VMA_NULL;
7295  pAllocationInfo->pUserData = hAllocation->GetUserData();
7296  return;
7297  }
7298  else // Last use time earlier than current time.
7299  {
7300  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7301  {
7302  localLastUseFrameIndex = localCurrFrameIndex;
7303  }
7304  }
7305  }
7306  }
7307  else
7308  {
7309  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7310  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7311  pAllocationInfo->offset = hAllocation->GetOffset();
7312  pAllocationInfo->size = hAllocation->GetSize();
7313  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7314  pAllocationInfo->pUserData = hAllocation->GetUserData();
7315  }
7316 }
7317 
7318 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7319 {
7320  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7321 
7322  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7323 
7324  if(newCreateInfo.maxBlockCount == 0)
7325  {
7326  newCreateInfo.maxBlockCount = SIZE_MAX;
7327  }
7328  if(newCreateInfo.blockSize == 0)
7329  {
7330  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7331  }
7332 
7333  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7334 
7335  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7336  if(res != VK_SUCCESS)
7337  {
7338  vma_delete(this, *pPool);
7339  *pPool = VMA_NULL;
7340  return res;
7341  }
7342 
7343  // Add to m_Pools.
7344  {
7345  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7346  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7347  }
7348 
7349  return VK_SUCCESS;
7350 }
7351 
7352 void VmaAllocator_T::DestroyPool(VmaPool pool)
7353 {
7354  // Remove from m_Pools.
7355  {
7356  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7357  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7358  VMA_ASSERT(success && "Pool not found in Allocator.");
7359  }
7360 
7361  vma_delete(this, pool);
7362 }
7363 
7364 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7365 {
7366  pool->m_BlockVector.GetPoolStats(pPoolStats);
7367 }
7368 
7369 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7370 {
7371  m_CurrentFrameIndex.store(frameIndex);
7372 }
7373 
7374 void VmaAllocator_T::MakePoolAllocationsLost(
7375  VmaPool hPool,
7376  size_t* pLostAllocationCount)
7377 {
7378  hPool->m_BlockVector.MakePoolAllocationsLost(
7379  m_CurrentFrameIndex.load(),
7380  pLostAllocationCount);
7381 }
7382 
7383 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7384 {
7385  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7386  (*pAllocation)->InitLost();
7387 }
7388 
7389 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7390 {
7391  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7392 
7393  VkResult res;
7394  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7395  {
7396  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7397  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7398  {
7399  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7400  if(res == VK_SUCCESS)
7401  {
7402  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7403  }
7404  }
7405  else
7406  {
7407  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7408  }
7409  }
7410  else
7411  {
7412  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7413  }
7414 
7415  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7416  {
7417  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7418  }
7419 
7420  return res;
7421 }
7422 
7423 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7424 {
7425  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7426  {
7427  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7428  }
7429 
7430  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7431 
7432  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7433  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7434  {
7435  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7436  m_HeapSizeLimit[heapIndex] += size;
7437  }
7438 }
7439 
7440 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
7441 {
7442  if(hAllocation->CanBecomeLost())
7443  {
7444  return VK_ERROR_MEMORY_MAP_FAILED;
7445  }
7446 
7447  switch(hAllocation->GetType())
7448  {
7449  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7450  {
7451  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7452  char *pBytes = nullptr;
7453  VkResult res = pBlock->Map(this, (void**)&pBytes);
7454  if(res == VK_SUCCESS)
7455  {
7456  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
7457  }
7458  return res;
7459  }
7460  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7461  return hAllocation->DedicatedAllocMap(this, ppData);
7462  default:
7463  VMA_ASSERT(0);
7464  return VK_ERROR_MEMORY_MAP_FAILED;
7465  }
7466 }
7467 
7468 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
7469 {
7470  switch(hAllocation->GetType())
7471  {
7472  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7473  {
7474  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7475  pBlock->Unmap(this);
7476  }
7477  break;
7478  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7479  hAllocation->DedicatedAllocUnmap(this);
7480  break;
7481  default:
7482  VMA_ASSERT(0);
7483  }
7484 }
7485 
7486 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
7487 {
7488  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
7489 
7490  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7491  {
7492  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7493  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7494  VMA_ASSERT(pDedicatedAllocations);
7495  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
7496  VMA_ASSERT(success);
7497  }
7498 
7499  VkDeviceMemory hMemory = allocation->GetMemory();
7500 
7501  if(allocation->GetMappedData() != VMA_NULL)
7502  {
7503  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
7504  }
7505 
7506  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
7507 
7508  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
7509 }
7510 
7511 #if VMA_STATS_STRING_ENABLED
7512 
7513 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
7514 {
7515  bool dedicatedAllocationsStarted = false;
7516  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7517  {
7518  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7519  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7520  VMA_ASSERT(pDedicatedAllocVector);
7521  if(pDedicatedAllocVector->empty() == false)
7522  {
7523  if(dedicatedAllocationsStarted == false)
7524  {
7525  dedicatedAllocationsStarted = true;
7526  json.WriteString("DedicatedAllocations");
7527  json.BeginObject();
7528  }
7529 
7530  json.BeginString("Type ");
7531  json.ContinueString(memTypeIndex);
7532  json.EndString();
7533 
7534  json.BeginArray();
7535 
7536  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
7537  {
7538  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
7539  json.BeginObject(true);
7540 
7541  json.WriteString("Size");
7542  json.WriteNumber(hAlloc->GetSize());
7543 
7544  json.WriteString("Type");
7545  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
7546 
7547  json.EndObject();
7548  }
7549 
7550  json.EndArray();
7551  }
7552  }
7553  if(dedicatedAllocationsStarted)
7554  {
7555  json.EndObject();
7556  }
7557 
7558  {
7559  bool allocationsStarted = false;
7560  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7561  {
7562  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
7563  {
7564  if(allocationsStarted == false)
7565  {
7566  allocationsStarted = true;
7567  json.WriteString("DefaultPools");
7568  json.BeginObject();
7569  }
7570 
7571  json.BeginString("Type ");
7572  json.ContinueString(memTypeIndex);
7573  json.EndString();
7574 
7575  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
7576  }
7577  }
7578  if(allocationsStarted)
7579  {
7580  json.EndObject();
7581  }
7582  }
7583 
7584  {
7585  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7586  const size_t poolCount = m_Pools.size();
7587  if(poolCount > 0)
7588  {
7589  json.WriteString("Pools");
7590  json.BeginArray();
7591  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7592  {
7593  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7594  }
7595  json.EndArray();
7596  }
7597  }
7598 }
7599 
7600 #endif // #if VMA_STATS_STRING_ENABLED
7601 
7602 static VkResult AllocateMemoryForImage(
7603  VmaAllocator allocator,
7604  VkImage image,
7605  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7606  VmaSuballocationType suballocType,
7607  VmaAllocation* pAllocation)
7608 {
7609  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7610 
7611  VkMemoryRequirements vkMemReq = {};
7612  bool requiresDedicatedAllocation = false;
7613  bool prefersDedicatedAllocation = false;
7614  allocator->GetImageMemoryRequirements(image, vkMemReq,
7615  requiresDedicatedAllocation, prefersDedicatedAllocation);
7616 
7617  return allocator->AllocateMemory(
7618  vkMemReq,
7619  requiresDedicatedAllocation,
7620  prefersDedicatedAllocation,
7621  VK_NULL_HANDLE, // dedicatedBuffer
7622  image, // dedicatedImage
7623  *pAllocationCreateInfo,
7624  suballocType,
7625  pAllocation);
7626 }
7627 
7629 // Public interface
7630 
7631 VkResult vmaCreateAllocator(
7632  const VmaAllocatorCreateInfo* pCreateInfo,
7633  VmaAllocator* pAllocator)
7634 {
7635  VMA_ASSERT(pCreateInfo && pAllocator);
7636  VMA_DEBUG_LOG("vmaCreateAllocator");
7637  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7638  return VK_SUCCESS;
7639 }
7640 
7641 void vmaDestroyAllocator(
7642  VmaAllocator allocator)
7643 {
7644  if(allocator != VK_NULL_HANDLE)
7645  {
7646  VMA_DEBUG_LOG("vmaDestroyAllocator");
7647  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7648  vma_delete(&allocationCallbacks, allocator);
7649  }
7650 }
7651 
7653  VmaAllocator allocator,
7654  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7655 {
7656  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7657  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7658 }
7659 
7661  VmaAllocator allocator,
7662  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7663 {
7664  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7665  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7666 }
7667 
7669  VmaAllocator allocator,
7670  uint32_t memoryTypeIndex,
7671  VkMemoryPropertyFlags* pFlags)
7672 {
7673  VMA_ASSERT(allocator && pFlags);
7674  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7675  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7676 }
7677 
7679  VmaAllocator allocator,
7680  uint32_t frameIndex)
7681 {
7682  VMA_ASSERT(allocator);
7683  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7684 
7685  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7686 
7687  allocator->SetCurrentFrameIndex(frameIndex);
7688 }
7689 
7690 void vmaCalculateStats(
7691  VmaAllocator allocator,
7692  VmaStats* pStats)
7693 {
7694  VMA_ASSERT(allocator && pStats);
7695  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7696  allocator->CalculateStats(pStats);
7697 }
7698 
7699 #if VMA_STATS_STRING_ENABLED
7700 
7701 void vmaBuildStatsString(
7702  VmaAllocator allocator,
7703  char** ppStatsString,
7704  VkBool32 detailedMap)
7705 {
7706  VMA_ASSERT(allocator && ppStatsString);
7707  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7708 
7709  VmaStringBuilder sb(allocator);
7710  {
7711  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7712  json.BeginObject();
7713 
7714  VmaStats stats;
7715  allocator->CalculateStats(&stats);
7716 
7717  json.WriteString("Total");
7718  VmaPrintStatInfo(json, stats.total);
7719 
7720  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7721  {
7722  json.BeginString("Heap ");
7723  json.ContinueString(heapIndex);
7724  json.EndString();
7725  json.BeginObject();
7726 
7727  json.WriteString("Size");
7728  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7729 
7730  json.WriteString("Flags");
7731  json.BeginArray(true);
7732  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7733  {
7734  json.WriteString("DEVICE_LOCAL");
7735  }
7736  json.EndArray();
7737 
7738  if(stats.memoryHeap[heapIndex].blockCount > 0)
7739  {
7740  json.WriteString("Stats");
7741  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7742  }
7743 
7744  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7745  {
7746  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7747  {
7748  json.BeginString("Type ");
7749  json.ContinueString(typeIndex);
7750  json.EndString();
7751 
7752  json.BeginObject();
7753 
7754  json.WriteString("Flags");
7755  json.BeginArray(true);
7756  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7757  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7758  {
7759  json.WriteString("DEVICE_LOCAL");
7760  }
7761  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7762  {
7763  json.WriteString("HOST_VISIBLE");
7764  }
7765  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7766  {
7767  json.WriteString("HOST_COHERENT");
7768  }
7769  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7770  {
7771  json.WriteString("HOST_CACHED");
7772  }
7773  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7774  {
7775  json.WriteString("LAZILY_ALLOCATED");
7776  }
7777  json.EndArray();
7778 
7779  if(stats.memoryType[typeIndex].blockCount > 0)
7780  {
7781  json.WriteString("Stats");
7782  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7783  }
7784 
7785  json.EndObject();
7786  }
7787  }
7788 
7789  json.EndObject();
7790  }
7791  if(detailedMap == VK_TRUE)
7792  {
7793  allocator->PrintDetailedMap(json);
7794  }
7795 
7796  json.EndObject();
7797  }
7798 
7799  const size_t len = sb.GetLength();
7800  char* const pChars = vma_new_array(allocator, char, len + 1);
7801  if(len > 0)
7802  {
7803  memcpy(pChars, sb.GetData(), len);
7804  }
7805  pChars[len] = '\0';
7806  *ppStatsString = pChars;
7807 }
7808 
7809 void vmaFreeStatsString(
7810  VmaAllocator allocator,
7811  char* pStatsString)
7812 {
7813  if(pStatsString != VMA_NULL)
7814  {
7815  VMA_ASSERT(allocator);
7816  size_t len = strlen(pStatsString);
7817  vma_delete_array(allocator, pStatsString, len + 1);
7818  }
7819 }
7820 
7821 #endif // #if VMA_STATS_STRING_ENABLED
7822 
7825 VkResult vmaFindMemoryTypeIndex(
7826  VmaAllocator allocator,
7827  uint32_t memoryTypeBits,
7828  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7829  uint32_t* pMemoryTypeIndex)
7830 {
7831  VMA_ASSERT(allocator != VK_NULL_HANDLE);
7832  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
7833  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
7834 
7835  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
7836  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
7837  if(preferredFlags == 0)
7838  {
7839  preferredFlags = requiredFlags;
7840  }
7841  // preferredFlags, if not 0, must be a superset of requiredFlags.
7842  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
7843 
7844  // Convert usage to requiredFlags and preferredFlags.
7845  switch(pAllocationCreateInfo->usage)
7846  {
7848  break;
7850  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7851  break;
7853  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
7854  break;
7856  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7857  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7858  break;
7860  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7861  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
7862  break;
7863  default:
7864  break;
7865  }
7866 
7867  *pMemoryTypeIndex = UINT32_MAX;
7868  uint32_t minCost = UINT32_MAX;
7869  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7870  memTypeIndex < allocator->GetMemoryTypeCount();
7871  ++memTypeIndex, memTypeBit <<= 1)
7872  {
7873  // This memory type is acceptable according to memoryTypeBits bitmask.
7874  if((memTypeBit & memoryTypeBits) != 0)
7875  {
7876  const VkMemoryPropertyFlags currFlags =
7877  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7878  // This memory type contains requiredFlags.
7879  if((requiredFlags & ~currFlags) == 0)
7880  {
7881  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7882  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7883  // Remember memory type with lowest cost.
7884  if(currCost < minCost)
7885  {
7886  *pMemoryTypeIndex = memTypeIndex;
7887  if(currCost == 0)
7888  {
7889  return VK_SUCCESS;
7890  }
7891  minCost = currCost;
7892  }
7893  }
7894  }
7895  }
7896  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7897 }
7898 
7899 VkResult vmaCreatePool(
7900  VmaAllocator allocator,
7901  const VmaPoolCreateInfo* pCreateInfo,
7902  VmaPool* pPool)
7903 {
7904  VMA_ASSERT(allocator && pCreateInfo && pPool);
7905 
7906  VMA_DEBUG_LOG("vmaCreatePool");
7907 
7908  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7909 
7910  return allocator->CreatePool(pCreateInfo, pPool);
7911 }
7912 
7913 void vmaDestroyPool(
7914  VmaAllocator allocator,
7915  VmaPool pool)
7916 {
7917  VMA_ASSERT(allocator);
7918 
7919  if(pool == VK_NULL_HANDLE)
7920  {
7921  return;
7922  }
7923 
7924  VMA_DEBUG_LOG("vmaDestroyPool");
7925 
7926  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7927 
7928  allocator->DestroyPool(pool);
7929 }
7930 
7931 void vmaGetPoolStats(
7932  VmaAllocator allocator,
7933  VmaPool pool,
7934  VmaPoolStats* pPoolStats)
7935 {
7936  VMA_ASSERT(allocator && pool && pPoolStats);
7937 
7938  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7939 
7940  allocator->GetPoolStats(pool, pPoolStats);
7941 }
7942 
7944  VmaAllocator allocator,
7945  VmaPool pool,
7946  size_t* pLostAllocationCount)
7947 {
7948  VMA_ASSERT(allocator && pool);
7949 
7950  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7951 
7952  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
7953 }
7954 
7955 VkResult vmaAllocateMemory(
7956  VmaAllocator allocator,
7957  const VkMemoryRequirements* pVkMemoryRequirements,
7958  const VmaAllocationCreateInfo* pCreateInfo,
7959  VmaAllocation* pAllocation,
7960  VmaAllocationInfo* pAllocationInfo)
7961 {
7962  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
7963 
7964  VMA_DEBUG_LOG("vmaAllocateMemory");
7965 
7966  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7967 
7968  VkResult result = allocator->AllocateMemory(
7969  *pVkMemoryRequirements,
7970  false, // requiresDedicatedAllocation
7971  false, // prefersDedicatedAllocation
7972  VK_NULL_HANDLE, // dedicatedBuffer
7973  VK_NULL_HANDLE, // dedicatedImage
7974  *pCreateInfo,
7975  VMA_SUBALLOCATION_TYPE_UNKNOWN,
7976  pAllocation);
7977 
7978  if(pAllocationInfo && result == VK_SUCCESS)
7979  {
7980  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7981  }
7982 
7983  return result;
7984 }
7985 
7987  VmaAllocator allocator,
7988  VkBuffer buffer,
7989  const VmaAllocationCreateInfo* pCreateInfo,
7990  VmaAllocation* pAllocation,
7991  VmaAllocationInfo* pAllocationInfo)
7992 {
7993  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7994 
7995  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
7996 
7997  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7998 
7999  VkMemoryRequirements vkMemReq = {};
8000  bool requiresDedicatedAllocation = false;
8001  bool prefersDedicatedAllocation = false;
8002  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8003  requiresDedicatedAllocation,
8004  prefersDedicatedAllocation);
8005 
8006  VkResult result = allocator->AllocateMemory(
8007  vkMemReq,
8008  requiresDedicatedAllocation,
8009  prefersDedicatedAllocation,
8010  buffer, // dedicatedBuffer
8011  VK_NULL_HANDLE, // dedicatedImage
8012  *pCreateInfo,
8013  VMA_SUBALLOCATION_TYPE_BUFFER,
8014  pAllocation);
8015 
8016  if(pAllocationInfo && result == VK_SUCCESS)
8017  {
8018  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8019  }
8020 
8021  return result;
8022 }
8023 
8024 VkResult vmaAllocateMemoryForImage(
8025  VmaAllocator allocator,
8026  VkImage image,
8027  const VmaAllocationCreateInfo* pCreateInfo,
8028  VmaAllocation* pAllocation,
8029  VmaAllocationInfo* pAllocationInfo)
8030 {
8031  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8032 
8033  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8034 
8035  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8036 
8037  VkResult result = AllocateMemoryForImage(
8038  allocator,
8039  image,
8040  pCreateInfo,
8041  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8042  pAllocation);
8043 
8044  if(pAllocationInfo && result == VK_SUCCESS)
8045  {
8046  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8047  }
8048 
8049  return result;
8050 }
8051 
8052 void vmaFreeMemory(
8053  VmaAllocator allocator,
8054  VmaAllocation allocation)
8055 {
8056  VMA_ASSERT(allocator && allocation);
8057 
8058  VMA_DEBUG_LOG("vmaFreeMemory");
8059 
8060  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8061 
8062  allocator->FreeMemory(allocation);
8063 }
8064 
8066  VmaAllocator allocator,
8067  VmaAllocation allocation,
8068  VmaAllocationInfo* pAllocationInfo)
8069 {
8070  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8071 
8072  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8073 
8074  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8075 }
8076 
8078  VmaAllocator allocator,
8079  VmaAllocation allocation,
8080  void* pUserData)
8081 {
8082  VMA_ASSERT(allocator && allocation);
8083 
8084  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8085 
8086  allocation->SetUserData(allocator, pUserData);
8087 }
8088 
8090  VmaAllocator allocator,
8091  VmaAllocation* pAllocation)
8092 {
8093  VMA_ASSERT(allocator && pAllocation);
8094 
8095  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8096 
8097  allocator->CreateLostAllocation(pAllocation);
8098 }
8099 
8100 VkResult vmaMapMemory(
8101  VmaAllocator allocator,
8102  VmaAllocation allocation,
8103  void** ppData)
8104 {
8105  VMA_ASSERT(allocator && allocation && ppData);
8106 
8107  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8108 
8109  return allocator->Map(allocation, ppData);
8110 }
8111 
8112 void vmaUnmapMemory(
8113  VmaAllocator allocator,
8114  VmaAllocation allocation)
8115 {
8116  VMA_ASSERT(allocator && allocation);
8117 
8118  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8119 
8120  allocator->Unmap(allocation);
8121 }
8122 
8123 VkResult vmaDefragment(
8124  VmaAllocator allocator,
8125  VmaAllocation* pAllocations,
8126  size_t allocationCount,
8127  VkBool32* pAllocationsChanged,
8128  const VmaDefragmentationInfo *pDefragmentationInfo,
8129  VmaDefragmentationStats* pDefragmentationStats)
8130 {
8131  VMA_ASSERT(allocator && pAllocations);
8132 
8133  VMA_DEBUG_LOG("vmaDefragment");
8134 
8135  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8136 
8137  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8138 }
8139 
8140 VkResult vmaCreateBuffer(
8141  VmaAllocator allocator,
8142  const VkBufferCreateInfo* pBufferCreateInfo,
8143  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8144  VkBuffer* pBuffer,
8145  VmaAllocation* pAllocation,
8146  VmaAllocationInfo* pAllocationInfo)
8147 {
8148  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8149 
8150  VMA_DEBUG_LOG("vmaCreateBuffer");
8151 
8152  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8153 
8154  *pBuffer = VK_NULL_HANDLE;
8155  *pAllocation = VK_NULL_HANDLE;
8156 
8157  // 1. Create VkBuffer.
8158  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8159  allocator->m_hDevice,
8160  pBufferCreateInfo,
8161  allocator->GetAllocationCallbacks(),
8162  pBuffer);
8163  if(res >= 0)
8164  {
8165  // 2. vkGetBufferMemoryRequirements.
8166  VkMemoryRequirements vkMemReq = {};
8167  bool requiresDedicatedAllocation = false;
8168  bool prefersDedicatedAllocation = false;
8169  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8170  requiresDedicatedAllocation, prefersDedicatedAllocation);
8171 
8172  // 3. Allocate memory using allocator.
8173  res = allocator->AllocateMemory(
8174  vkMemReq,
8175  requiresDedicatedAllocation,
8176  prefersDedicatedAllocation,
8177  *pBuffer, // dedicatedBuffer
8178  VK_NULL_HANDLE, // dedicatedImage
8179  *pAllocationCreateInfo,
8180  VMA_SUBALLOCATION_TYPE_BUFFER,
8181  pAllocation);
8182  if(res >= 0)
8183  {
8184  // 3. Bind buffer with memory.
8185  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8186  allocator->m_hDevice,
8187  *pBuffer,
8188  (*pAllocation)->GetMemory(),
8189  (*pAllocation)->GetOffset());
8190  if(res >= 0)
8191  {
8192  // All steps succeeded.
8193  if(pAllocationInfo != VMA_NULL)
8194  {
8195  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8196  }
8197  return VK_SUCCESS;
8198  }
8199  allocator->FreeMemory(*pAllocation);
8200  *pAllocation = VK_NULL_HANDLE;
8201  return res;
8202  }
8203  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8204  *pBuffer = VK_NULL_HANDLE;
8205  return res;
8206  }
8207  return res;
8208 }
8209 
8210 void vmaDestroyBuffer(
8211  VmaAllocator allocator,
8212  VkBuffer buffer,
8213  VmaAllocation allocation)
8214 {
8215  if(buffer != VK_NULL_HANDLE)
8216  {
8217  VMA_ASSERT(allocator);
8218 
8219  VMA_DEBUG_LOG("vmaDestroyBuffer");
8220 
8221  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8222 
8223  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8224 
8225  allocator->FreeMemory(allocation);
8226  }
8227 }
8228 
8229 VkResult vmaCreateImage(
8230  VmaAllocator allocator,
8231  const VkImageCreateInfo* pImageCreateInfo,
8232  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8233  VkImage* pImage,
8234  VmaAllocation* pAllocation,
8235  VmaAllocationInfo* pAllocationInfo)
8236 {
8237  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8238 
8239  VMA_DEBUG_LOG("vmaCreateImage");
8240 
8241  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8242 
8243  *pImage = VK_NULL_HANDLE;
8244  *pAllocation = VK_NULL_HANDLE;
8245 
8246  // 1. Create VkImage.
8247  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8248  allocator->m_hDevice,
8249  pImageCreateInfo,
8250  allocator->GetAllocationCallbacks(),
8251  pImage);
8252  if(res >= 0)
8253  {
8254  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8255  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8256  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8257 
8258  // 2. Allocate memory using allocator.
8259  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8260  if(res >= 0)
8261  {
8262  // 3. Bind image with memory.
8263  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8264  allocator->m_hDevice,
8265  *pImage,
8266  (*pAllocation)->GetMemory(),
8267  (*pAllocation)->GetOffset());
8268  if(res >= 0)
8269  {
8270  // All steps succeeded.
8271  if(pAllocationInfo != VMA_NULL)
8272  {
8273  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8274  }
8275  return VK_SUCCESS;
8276  }
8277  allocator->FreeMemory(*pAllocation);
8278  *pAllocation = VK_NULL_HANDLE;
8279  return res;
8280  }
8281  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8282  *pImage = VK_NULL_HANDLE;
8283  return res;
8284  }
8285  return res;
8286 }
8287 
8288 void vmaDestroyImage(
8289  VmaAllocator allocator,
8290  VkImage image,
8291  VmaAllocation allocation)
8292 {
8293  if(image != VK_NULL_HANDLE)
8294  {
8295  VMA_ASSERT(allocator);
8296 
8297  VMA_DEBUG_LOG("vmaDestroyImage");
8298 
8299  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8300 
8301  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8302 
8303  allocator->FreeMemory(allocation);
8304  }
8305 }
8306 
8307 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:670
-
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:887
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
594 #include <vulkan/vulkan.h>
595 
596 VK_DEFINE_HANDLE(VmaAllocator)
597 
598 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
600  VmaAllocator allocator,
601  uint32_t memoryType,
602  VkDeviceMemory memory,
603  VkDeviceSize size);
605 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
606  VmaAllocator allocator,
607  uint32_t memoryType,
608  VkDeviceMemory memory,
609  VkDeviceSize size);
610 
618 typedef struct VmaDeviceMemoryCallbacks {
624 
660 
663 typedef VkFlags VmaAllocatorCreateFlags;
664 
669 typedef struct VmaVulkanFunctions {
670  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
671  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
672  PFN_vkAllocateMemory vkAllocateMemory;
673  PFN_vkFreeMemory vkFreeMemory;
674  PFN_vkMapMemory vkMapMemory;
675  PFN_vkUnmapMemory vkUnmapMemory;
676  PFN_vkBindBufferMemory vkBindBufferMemory;
677  PFN_vkBindImageMemory vkBindImageMemory;
678  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
679  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
680  PFN_vkCreateBuffer vkCreateBuffer;
681  PFN_vkDestroyBuffer vkDestroyBuffer;
682  PFN_vkCreateImage vkCreateImage;
683  PFN_vkDestroyImage vkDestroyImage;
684  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
685  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
687 
690 {
692  VmaAllocatorCreateFlags flags;
694 
695  VkPhysicalDevice physicalDevice;
697 
698  VkDevice device;
700 
703 
706 
707  const VkAllocationCallbacks* pAllocationCallbacks;
709 
724  uint32_t frameInUseCount;
748  const VkDeviceSize* pHeapSizeLimit;
762 
764 VkResult vmaCreateAllocator(
765  const VmaAllocatorCreateInfo* pCreateInfo,
766  VmaAllocator* pAllocator);
767 
770  VmaAllocator allocator);
771 
777  VmaAllocator allocator,
778  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
779 
785  VmaAllocator allocator,
786  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
787 
795  VmaAllocator allocator,
796  uint32_t memoryTypeIndex,
797  VkMemoryPropertyFlags* pFlags);
798 
808  VmaAllocator allocator,
809  uint32_t frameIndex);
810 
813 typedef struct VmaStatInfo
814 {
816  uint32_t blockCount;
818  uint32_t allocationCount;
822  VkDeviceSize usedBytes;
824  VkDeviceSize unusedBytes;
825  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
826  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
827 } VmaStatInfo;
828 
830 typedef struct VmaStats
831 {
832  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
833  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
835 } VmaStats;
836 
838 void vmaCalculateStats(
839  VmaAllocator allocator,
840  VmaStats* pStats);
841 
842 #define VMA_STATS_STRING_ENABLED 1
843 
844 #if VMA_STATS_STRING_ENABLED
845 
847 
850  VmaAllocator allocator,
851  char** ppStatsString,
852  VkBool32 detailedMap);
853 
854 void vmaFreeStatsString(
855  VmaAllocator allocator,
856  char* pStatsString);
857 
858 #endif // #if VMA_STATS_STRING_ENABLED
859 
860 VK_DEFINE_HANDLE(VmaPool)
861 
862 typedef enum VmaMemoryUsage
863 {
869 
872 
875 
879 
894 
944 
947 typedef VkFlags VmaAllocationCreateFlags;
948 
950 {
952  VmaAllocationCreateFlags flags;
963  VkMemoryPropertyFlags requiredFlags;
969  VkMemoryPropertyFlags preferredFlags;
971  void* pUserData;
976  VmaPool pool;
978 
993 VkResult vmaFindMemoryTypeIndex(
994  VmaAllocator allocator,
995  uint32_t memoryTypeBits,
996  const VmaAllocationCreateInfo* pAllocationCreateInfo,
997  uint32_t* pMemoryTypeIndex);
998 
1019 
1022 typedef VkFlags VmaPoolCreateFlags;
1023 
1026 typedef struct VmaPoolCreateInfo {
1032  VmaPoolCreateFlags flags;
1037  VkDeviceSize blockSize;
1066 
1069 typedef struct VmaPoolStats {
1072  VkDeviceSize size;
1075  VkDeviceSize unusedSize;
1088  VkDeviceSize unusedRangeSizeMax;
1089 } VmaPoolStats;
1090 
1097 VkResult vmaCreatePool(
1098  VmaAllocator allocator,
1099  const VmaPoolCreateInfo* pCreateInfo,
1100  VmaPool* pPool);
1101 
1104 void vmaDestroyPool(
1105  VmaAllocator allocator,
1106  VmaPool pool);
1107 
1114 void vmaGetPoolStats(
1115  VmaAllocator allocator,
1116  VmaPool pool,
1117  VmaPoolStats* pPoolStats);
1118 
1126  VmaAllocator allocator,
1127  VmaPool pool,
1128  size_t* pLostAllocationCount);
1129 
1130 VK_DEFINE_HANDLE(VmaAllocation)
1131 
1132 
1134 typedef struct VmaAllocationInfo {
1139  uint32_t memoryType;
1148  VkDeviceMemory deviceMemory;
1153  VkDeviceSize offset;
1158  VkDeviceSize size;
1172  void* pUserData;
1174 
1185 VkResult vmaAllocateMemory(
1186  VmaAllocator allocator,
1187  const VkMemoryRequirements* pVkMemoryRequirements,
1188  const VmaAllocationCreateInfo* pCreateInfo,
1189  VmaAllocation* pAllocation,
1190  VmaAllocationInfo* pAllocationInfo);
1191 
1199  VmaAllocator allocator,
1200  VkBuffer buffer,
1201  const VmaAllocationCreateInfo* pCreateInfo,
1202  VmaAllocation* pAllocation,
1203  VmaAllocationInfo* pAllocationInfo);
1204 
1206 VkResult vmaAllocateMemoryForImage(
1207  VmaAllocator allocator,
1208  VkImage image,
1209  const VmaAllocationCreateInfo* pCreateInfo,
1210  VmaAllocation* pAllocation,
1211  VmaAllocationInfo* pAllocationInfo);
1212 
1214 void vmaFreeMemory(
1215  VmaAllocator allocator,
1216  VmaAllocation allocation);
1217 
1220  VmaAllocator allocator,
1221  VmaAllocation allocation,
1222  VmaAllocationInfo* pAllocationInfo);
1223 
1238  VmaAllocator allocator,
1239  VmaAllocation allocation,
1240  void* pUserData);
1241 
1253  VmaAllocator allocator,
1254  VmaAllocation* pAllocation);
1255 
1290 VkResult vmaMapMemory(
1291  VmaAllocator allocator,
1292  VmaAllocation allocation,
1293  void** ppData);
1294 
1299 void vmaUnmapMemory(
1300  VmaAllocator allocator,
1301  VmaAllocation allocation);
1302 
1304 typedef struct VmaDefragmentationInfo {
1309  VkDeviceSize maxBytesToMove;
1316 
1318 typedef struct VmaDefragmentationStats {
1320  VkDeviceSize bytesMoved;
1322  VkDeviceSize bytesFreed;
1328 
1405 VkResult vmaDefragment(
1406  VmaAllocator allocator,
1407  VmaAllocation* pAllocations,
1408  size_t allocationCount,
1409  VkBool32* pAllocationsChanged,
1410  const VmaDefragmentationInfo *pDefragmentationInfo,
1411  VmaDefragmentationStats* pDefragmentationStats);
1412 
1439 VkResult vmaCreateBuffer(
1440  VmaAllocator allocator,
1441  const VkBufferCreateInfo* pBufferCreateInfo,
1442  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1443  VkBuffer* pBuffer,
1444  VmaAllocation* pAllocation,
1445  VmaAllocationInfo* pAllocationInfo);
1446 
1458 void vmaDestroyBuffer(
1459  VmaAllocator allocator,
1460  VkBuffer buffer,
1461  VmaAllocation allocation);
1462 
1464 VkResult vmaCreateImage(
1465  VmaAllocator allocator,
1466  const VkImageCreateInfo* pImageCreateInfo,
1467  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1468  VkImage* pImage,
1469  VmaAllocation* pAllocation,
1470  VmaAllocationInfo* pAllocationInfo);
1471 
1483 void vmaDestroyImage(
1484  VmaAllocator allocator,
1485  VkImage image,
1486  VmaAllocation allocation);
1487 
1488 #ifdef __cplusplus
1489 }
1490 #endif
1491 
1492 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1493 
1494 // For Visual Studio IntelliSense.
1495 #ifdef __INTELLISENSE__
1496 #define VMA_IMPLEMENTATION
1497 #endif
1498 
1499 #ifdef VMA_IMPLEMENTATION
1500 #undef VMA_IMPLEMENTATION
1501 
1502 #include <cstdint>
1503 #include <cstdlib>
1504 #include <cstring>
1505 
1506 /*******************************************************************************
1507 CONFIGURATION SECTION
1508 
1509 Define some of these macros before each #include of this header or change them
1510 here if you need other then default behavior depending on your environment.
1511 */
1512 
1513 /*
1514 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1515 internally, like:
1516 
1517  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1518 
1519 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1520 VmaAllocatorCreateInfo::pVulkanFunctions.
1521 */
1522 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
1523 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1524 #endif
1525 
1526 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1527 //#define VMA_USE_STL_CONTAINERS 1
1528 
1529 /* Set this macro to 1 to make the library including and using STL containers:
1530 std::pair, std::vector, std::list, std::unordered_map.
1531 
1532 Set it to 0 or undefined to make the library using its own implementation of
1533 the containers.
1534 */
1535 #if VMA_USE_STL_CONTAINERS
1536  #define VMA_USE_STL_VECTOR 1
1537  #define VMA_USE_STL_UNORDERED_MAP 1
1538  #define VMA_USE_STL_LIST 1
1539 #endif
1540 
1541 #if VMA_USE_STL_VECTOR
1542  #include <vector>
1543 #endif
1544 
1545 #if VMA_USE_STL_UNORDERED_MAP
1546  #include <unordered_map>
1547 #endif
1548 
1549 #if VMA_USE_STL_LIST
1550  #include <list>
1551 #endif
1552 
1553 /*
1554 Following headers are used in this CONFIGURATION section only, so feel free to
1555 remove them if not needed.
1556 */
1557 #include <cassert> // for assert
1558 #include <algorithm> // for min, max
1559 #include <mutex> // for std::mutex
1560 #include <atomic> // for std::atomic
1561 
1562 #if !defined(_WIN32)
1563  #include <malloc.h> // for aligned_alloc()
1564 #endif
1565 
1566 // Normal assert to check for programmer's errors, especially in Debug configuration.
1567 #ifndef VMA_ASSERT
1568  #ifdef _DEBUG
1569  #define VMA_ASSERT(expr) assert(expr)
1570  #else
1571  #define VMA_ASSERT(expr)
1572  #endif
1573 #endif
1574 
1575 // Assert that will be called very often, like inside data structures e.g. operator[].
1576 // Making it non-empty can make program slow.
1577 #ifndef VMA_HEAVY_ASSERT
1578  #ifdef _DEBUG
1579  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1580  #else
1581  #define VMA_HEAVY_ASSERT(expr)
1582  #endif
1583 #endif
1584 
1585 #ifndef VMA_NULL
1586  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1587  #define VMA_NULL nullptr
1588 #endif
1589 
1590 #ifndef VMA_ALIGN_OF
1591  #define VMA_ALIGN_OF(type) (__alignof(type))
1592 #endif
1593 
1594 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1595  #if defined(_WIN32)
1596  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1597  #else
1598  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1599  #endif
1600 #endif
1601 
1602 #ifndef VMA_SYSTEM_FREE
1603  #if defined(_WIN32)
1604  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1605  #else
1606  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1607  #endif
1608 #endif
1609 
1610 #ifndef VMA_MIN
1611  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1612 #endif
1613 
1614 #ifndef VMA_MAX
1615  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1616 #endif
1617 
1618 #ifndef VMA_SWAP
1619  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1620 #endif
1621 
1622 #ifndef VMA_SORT
1623  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1624 #endif
1625 
1626 #ifndef VMA_DEBUG_LOG
1627  #define VMA_DEBUG_LOG(format, ...)
1628  /*
1629  #define VMA_DEBUG_LOG(format, ...) do { \
1630  printf(format, __VA_ARGS__); \
1631  printf("\n"); \
1632  } while(false)
1633  */
1634 #endif
1635 
1636 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1637 #if VMA_STATS_STRING_ENABLED
1638  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1639  {
1640  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1641  }
1642  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1643  {
1644  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1645  }
1646  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1647  {
1648  snprintf(outStr, strLen, "%p", ptr);
1649  }
1650 #endif
1651 
1652 #ifndef VMA_MUTEX
1653  class VmaMutex
1654  {
1655  public:
1656  VmaMutex() { }
1657  ~VmaMutex() { }
1658  void Lock() { m_Mutex.lock(); }
1659  void Unlock() { m_Mutex.unlock(); }
1660  private:
1661  std::mutex m_Mutex;
1662  };
1663  #define VMA_MUTEX VmaMutex
1664 #endif
1665 
1666 /*
1667 If providing your own implementation, you need to implement a subset of std::atomic:
1668 
1669 - Constructor(uint32_t desired)
1670 - uint32_t load() const
1671 - void store(uint32_t desired)
1672 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1673 */
1674 #ifndef VMA_ATOMIC_UINT32
1675  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1676 #endif
1677 
1678 #ifndef VMA_BEST_FIT
1679 
1691  #define VMA_BEST_FIT (1)
1692 #endif
1693 
1694 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
1695 
1699  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
1700 #endif
1701 
1702 #ifndef VMA_DEBUG_ALIGNMENT
1703 
1707  #define VMA_DEBUG_ALIGNMENT (1)
1708 #endif
1709 
1710 #ifndef VMA_DEBUG_MARGIN
1711 
1715  #define VMA_DEBUG_MARGIN (0)
1716 #endif
1717 
1718 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1719 
1723  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1724 #endif
1725 
1726 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1727 
1731  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1732 #endif
1733 
1734 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1735  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1737 #endif
1738 
1739 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1740  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1742 #endif
1743 
1744 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1745  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1747 #endif
1748 
1749 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1750 
1751 /*******************************************************************************
1752 END OF CONFIGURATION
1753 */
1754 
1755 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1756  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1757 
1758 // Returns number of bits set to 1 in (v).
1759 static inline uint32_t CountBitsSet(uint32_t v)
1760 {
1761  uint32_t c = v - ((v >> 1) & 0x55555555);
1762  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1763  c = ((c >> 4) + c) & 0x0F0F0F0F;
1764  c = ((c >> 8) + c) & 0x00FF00FF;
1765  c = ((c >> 16) + c) & 0x0000FFFF;
1766  return c;
1767 }
1768 
1769 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1770 // Use types like uint32_t, uint64_t as T.
1771 template <typename T>
1772 static inline T VmaAlignUp(T val, T align)
1773 {
1774  return (val + align - 1) / align * align;
1775 }
1776 
1777 // Division with mathematical rounding to nearest number.
1778 template <typename T>
1779 inline T VmaRoundDiv(T x, T y)
1780 {
1781  return (x + (y / (T)2)) / y;
1782 }
1783 
1784 #ifndef VMA_SORT
1785 
1786 template<typename Iterator, typename Compare>
1787 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1788 {
1789  Iterator centerValue = end; --centerValue;
1790  Iterator insertIndex = beg;
1791  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1792  {
1793  if(cmp(*memTypeIndex, *centerValue))
1794  {
1795  if(insertIndex != memTypeIndex)
1796  {
1797  VMA_SWAP(*memTypeIndex, *insertIndex);
1798  }
1799  ++insertIndex;
1800  }
1801  }
1802  if(insertIndex != centerValue)
1803  {
1804  VMA_SWAP(*insertIndex, *centerValue);
1805  }
1806  return insertIndex;
1807 }
1808 
1809 template<typename Iterator, typename Compare>
1810 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1811 {
1812  if(beg < end)
1813  {
1814  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1815  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1816  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1817  }
1818 }
1819 
1820 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1821 
1822 #endif // #ifndef VMA_SORT
1823 
1824 /*
1825 Returns true if two memory blocks occupy overlapping pages.
1826 ResourceA must be in less memory offset than ResourceB.
1827 
1828 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1829 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1830 */
1831 static inline bool VmaBlocksOnSamePage(
1832  VkDeviceSize resourceAOffset,
1833  VkDeviceSize resourceASize,
1834  VkDeviceSize resourceBOffset,
1835  VkDeviceSize pageSize)
1836 {
1837  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1838  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1839  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1840  VkDeviceSize resourceBStart = resourceBOffset;
1841  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1842  return resourceAEndPage == resourceBStartPage;
1843 }
1844 
1845 enum VmaSuballocationType
1846 {
1847  VMA_SUBALLOCATION_TYPE_FREE = 0,
1848  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1849  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1850  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1851  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1852  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1853  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1854 };
1855 
1856 /*
1857 Returns true if given suballocation types could conflict and must respect
1858 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1859 or linear image and another one is optimal image. If type is unknown, behave
1860 conservatively.
1861 */
1862 static inline bool VmaIsBufferImageGranularityConflict(
1863  VmaSuballocationType suballocType1,
1864  VmaSuballocationType suballocType2)
1865 {
1866  if(suballocType1 > suballocType2)
1867  {
1868  VMA_SWAP(suballocType1, suballocType2);
1869  }
1870 
1871  switch(suballocType1)
1872  {
1873  case VMA_SUBALLOCATION_TYPE_FREE:
1874  return false;
1875  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1876  return true;
1877  case VMA_SUBALLOCATION_TYPE_BUFFER:
1878  return
1879  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1880  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1881  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1882  return
1883  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1884  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1885  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1886  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1887  return
1888  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1889  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1890  return false;
1891  default:
1892  VMA_ASSERT(0);
1893  return true;
1894  }
1895 }
1896 
1897 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1898 struct VmaMutexLock
1899 {
1900 public:
1901  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1902  m_pMutex(useMutex ? &mutex : VMA_NULL)
1903  {
1904  if(m_pMutex)
1905  {
1906  m_pMutex->Lock();
1907  }
1908  }
1909 
1910  ~VmaMutexLock()
1911  {
1912  if(m_pMutex)
1913  {
1914  m_pMutex->Unlock();
1915  }
1916  }
1917 
1918 private:
1919  VMA_MUTEX* m_pMutex;
1920 };
1921 
1922 #if VMA_DEBUG_GLOBAL_MUTEX
1923  static VMA_MUTEX gDebugGlobalMutex;
1924  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1925 #else
1926  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1927 #endif
1928 
1929 // Minimum size of a free suballocation to register it in the free suballocation collection.
1930 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1931 
1932 /*
1933 Performs binary search and returns iterator to first element that is greater or
1934 equal to (key), according to comparison (cmp).
1935 
1936 Cmp should return true if first argument is less than second argument.
1937 
1938 Returned value is the found element, if present in the collection or place where
1939 new element with value (key) should be inserted.
1940 */
1941 template <typename IterT, typename KeyT, typename CmpT>
1942 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1943 {
1944  size_t down = 0, up = (end - beg);
1945  while(down < up)
1946  {
1947  const size_t mid = (down + up) / 2;
1948  if(cmp(*(beg+mid), key))
1949  {
1950  down = mid + 1;
1951  }
1952  else
1953  {
1954  up = mid;
1955  }
1956  }
1957  return beg + down;
1958 }
1959 
1961 // Memory allocation
1962 
1963 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1964 {
1965  if((pAllocationCallbacks != VMA_NULL) &&
1966  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1967  {
1968  return (*pAllocationCallbacks->pfnAllocation)(
1969  pAllocationCallbacks->pUserData,
1970  size,
1971  alignment,
1972  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1973  }
1974  else
1975  {
1976  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1977  }
1978 }
1979 
1980 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1981 {
1982  if((pAllocationCallbacks != VMA_NULL) &&
1983  (pAllocationCallbacks->pfnFree != VMA_NULL))
1984  {
1985  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1986  }
1987  else
1988  {
1989  VMA_SYSTEM_FREE(ptr);
1990  }
1991 }
1992 
1993 template<typename T>
1994 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1995 {
1996  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1997 }
1998 
1999 template<typename T>
2000 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2001 {
2002  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2003 }
2004 
2005 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2006 
2007 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2008 
2009 template<typename T>
2010 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2011 {
2012  ptr->~T();
2013  VmaFree(pAllocationCallbacks, ptr);
2014 }
2015 
2016 template<typename T>
2017 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2018 {
2019  if(ptr != VMA_NULL)
2020  {
2021  for(size_t i = count; i--; )
2022  {
2023  ptr[i].~T();
2024  }
2025  VmaFree(pAllocationCallbacks, ptr);
2026  }
2027 }
2028 
2029 // STL-compatible allocator.
2030 template<typename T>
2031 class VmaStlAllocator
2032 {
2033 public:
2034  const VkAllocationCallbacks* const m_pCallbacks;
2035  typedef T value_type;
2036 
2037  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2038  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2039 
2040  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2041  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2042 
2043  template<typename U>
2044  bool operator==(const VmaStlAllocator<U>& rhs) const
2045  {
2046  return m_pCallbacks == rhs.m_pCallbacks;
2047  }
2048  template<typename U>
2049  bool operator!=(const VmaStlAllocator<U>& rhs) const
2050  {
2051  return m_pCallbacks != rhs.m_pCallbacks;
2052  }
2053 
2054  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2055 };
2056 
2057 #if VMA_USE_STL_VECTOR
2058 
2059 #define VmaVector std::vector
2060 
2061 template<typename T, typename allocatorT>
2062 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2063 {
2064  vec.insert(vec.begin() + index, item);
2065 }
2066 
2067 template<typename T, typename allocatorT>
2068 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2069 {
2070  vec.erase(vec.begin() + index);
2071 }
2072 
2073 #else // #if VMA_USE_STL_VECTOR
2074 
2075 /* Class with interface compatible with subset of std::vector.
2076 T must be POD because constructors and destructors are not called and memcpy is
2077 used for these objects. */
2078 template<typename T, typename AllocatorT>
2079 class VmaVector
2080 {
2081 public:
2082  typedef T value_type;
2083 
2084  VmaVector(const AllocatorT& allocator) :
2085  m_Allocator(allocator),
2086  m_pArray(VMA_NULL),
2087  m_Count(0),
2088  m_Capacity(0)
2089  {
2090  }
2091 
2092  VmaVector(size_t count, const AllocatorT& allocator) :
2093  m_Allocator(allocator),
2094  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2095  m_Count(count),
2096  m_Capacity(count)
2097  {
2098  }
2099 
2100  VmaVector(const VmaVector<T, AllocatorT>& src) :
2101  m_Allocator(src.m_Allocator),
2102  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2103  m_Count(src.m_Count),
2104  m_Capacity(src.m_Count)
2105  {
2106  if(m_Count != 0)
2107  {
2108  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2109  }
2110  }
2111 
2112  ~VmaVector()
2113  {
2114  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2115  }
2116 
2117  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2118  {
2119  if(&rhs != this)
2120  {
2121  resize(rhs.m_Count);
2122  if(m_Count != 0)
2123  {
2124  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2125  }
2126  }
2127  return *this;
2128  }
2129 
2130  bool empty() const { return m_Count == 0; }
2131  size_t size() const { return m_Count; }
2132  T* data() { return m_pArray; }
2133  const T* data() const { return m_pArray; }
2134 
2135  T& operator[](size_t index)
2136  {
2137  VMA_HEAVY_ASSERT(index < m_Count);
2138  return m_pArray[index];
2139  }
2140  const T& operator[](size_t index) const
2141  {
2142  VMA_HEAVY_ASSERT(index < m_Count);
2143  return m_pArray[index];
2144  }
2145 
2146  T& front()
2147  {
2148  VMA_HEAVY_ASSERT(m_Count > 0);
2149  return m_pArray[0];
2150  }
2151  const T& front() const
2152  {
2153  VMA_HEAVY_ASSERT(m_Count > 0);
2154  return m_pArray[0];
2155  }
2156  T& back()
2157  {
2158  VMA_HEAVY_ASSERT(m_Count > 0);
2159  return m_pArray[m_Count - 1];
2160  }
2161  const T& back() const
2162  {
2163  VMA_HEAVY_ASSERT(m_Count > 0);
2164  return m_pArray[m_Count - 1];
2165  }
2166 
2167  void reserve(size_t newCapacity, bool freeMemory = false)
2168  {
2169  newCapacity = VMA_MAX(newCapacity, m_Count);
2170 
2171  if((newCapacity < m_Capacity) && !freeMemory)
2172  {
2173  newCapacity = m_Capacity;
2174  }
2175 
2176  if(newCapacity != m_Capacity)
2177  {
2178  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2179  if(m_Count != 0)
2180  {
2181  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2182  }
2183  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2184  m_Capacity = newCapacity;
2185  m_pArray = newArray;
2186  }
2187  }
2188 
2189  void resize(size_t newCount, bool freeMemory = false)
2190  {
2191  size_t newCapacity = m_Capacity;
2192  if(newCount > m_Capacity)
2193  {
2194  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2195  }
2196  else if(freeMemory)
2197  {
2198  newCapacity = newCount;
2199  }
2200 
2201  if(newCapacity != m_Capacity)
2202  {
2203  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2204  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2205  if(elementsToCopy != 0)
2206  {
2207  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2208  }
2209  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2210  m_Capacity = newCapacity;
2211  m_pArray = newArray;
2212  }
2213 
2214  m_Count = newCount;
2215  }
2216 
2217  void clear(bool freeMemory = false)
2218  {
2219  resize(0, freeMemory);
2220  }
2221 
2222  void insert(size_t index, const T& src)
2223  {
2224  VMA_HEAVY_ASSERT(index <= m_Count);
2225  const size_t oldCount = size();
2226  resize(oldCount + 1);
2227  if(index < oldCount)
2228  {
2229  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2230  }
2231  m_pArray[index] = src;
2232  }
2233 
2234  void remove(size_t index)
2235  {
2236  VMA_HEAVY_ASSERT(index < m_Count);
2237  const size_t oldCount = size();
2238  if(index < oldCount - 1)
2239  {
2240  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2241  }
2242  resize(oldCount - 1);
2243  }
2244 
2245  void push_back(const T& src)
2246  {
2247  const size_t newIndex = size();
2248  resize(newIndex + 1);
2249  m_pArray[newIndex] = src;
2250  }
2251 
2252  void pop_back()
2253  {
2254  VMA_HEAVY_ASSERT(m_Count > 0);
2255  resize(size() - 1);
2256  }
2257 
2258  void push_front(const T& src)
2259  {
2260  insert(0, src);
2261  }
2262 
2263  void pop_front()
2264  {
2265  VMA_HEAVY_ASSERT(m_Count > 0);
2266  remove(0);
2267  }
2268 
2269  typedef T* iterator;
2270 
2271  iterator begin() { return m_pArray; }
2272  iterator end() { return m_pArray + m_Count; }
2273 
2274 private:
2275  AllocatorT m_Allocator;
2276  T* m_pArray;
2277  size_t m_Count;
2278  size_t m_Capacity;
2279 };
2280 
2281 template<typename T, typename allocatorT>
2282 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2283 {
2284  vec.insert(index, item);
2285 }
2286 
2287 template<typename T, typename allocatorT>
2288 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2289 {
2290  vec.remove(index);
2291 }
2292 
2293 #endif // #if VMA_USE_STL_VECTOR
2294 
2295 template<typename CmpLess, typename VectorT>
2296 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2297 {
2298  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2299  vector.data(),
2300  vector.data() + vector.size(),
2301  value,
2302  CmpLess()) - vector.data();
2303  VmaVectorInsert(vector, indexToInsert, value);
2304  return indexToInsert;
2305 }
2306 
2307 template<typename CmpLess, typename VectorT>
2308 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2309 {
2310  CmpLess comparator;
2311  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2312  vector.begin(),
2313  vector.end(),
2314  value,
2315  comparator);
2316  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2317  {
2318  size_t indexToRemove = it - vector.begin();
2319  VmaVectorRemove(vector, indexToRemove);
2320  return true;
2321  }
2322  return false;
2323 }
2324 
2325 template<typename CmpLess, typename VectorT>
2326 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2327 {
2328  CmpLess comparator;
2329  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2330  vector.data(),
2331  vector.data() + vector.size(),
2332  value,
2333  comparator);
2334  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2335  {
2336  return it - vector.begin();
2337  }
2338  else
2339  {
2340  return vector.size();
2341  }
2342 }
2343 
2345 // class VmaPoolAllocator
2346 
2347 /*
2348 Allocator for objects of type T using a list of arrays (pools) to speed up
2349 allocation. Number of elements that can be allocated is not bounded because
2350 allocator can create multiple blocks.
2351 */
2352 template<typename T>
2353 class VmaPoolAllocator
2354 {
2355 public:
2356  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2357  ~VmaPoolAllocator();
2358  void Clear();
2359  T* Alloc();
2360  void Free(T* ptr);
2361 
2362 private:
2363  union Item
2364  {
2365  uint32_t NextFreeIndex;
2366  T Value;
2367  };
2368 
2369  struct ItemBlock
2370  {
2371  Item* pItems;
2372  uint32_t FirstFreeIndex;
2373  };
2374 
2375  const VkAllocationCallbacks* m_pAllocationCallbacks;
2376  size_t m_ItemsPerBlock;
2377  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2378 
2379  ItemBlock& CreateNewBlock();
2380 };
2381 
2382 template<typename T>
2383 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2384  m_pAllocationCallbacks(pAllocationCallbacks),
2385  m_ItemsPerBlock(itemsPerBlock),
2386  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2387 {
2388  VMA_ASSERT(itemsPerBlock > 0);
2389 }
2390 
2391 template<typename T>
2392 VmaPoolAllocator<T>::~VmaPoolAllocator()
2393 {
2394  Clear();
2395 }
2396 
2397 template<typename T>
2398 void VmaPoolAllocator<T>::Clear()
2399 {
2400  for(size_t i = m_ItemBlocks.size(); i--; )
2401  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2402  m_ItemBlocks.clear();
2403 }
2404 
2405 template<typename T>
2406 T* VmaPoolAllocator<T>::Alloc()
2407 {
2408  for(size_t i = m_ItemBlocks.size(); i--; )
2409  {
2410  ItemBlock& block = m_ItemBlocks[i];
2411  // This block has some free items: Use first one.
2412  if(block.FirstFreeIndex != UINT32_MAX)
2413  {
2414  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2415  block.FirstFreeIndex = pItem->NextFreeIndex;
2416  return &pItem->Value;
2417  }
2418  }
2419 
2420  // No block has free item: Create new one and use it.
2421  ItemBlock& newBlock = CreateNewBlock();
2422  Item* const pItem = &newBlock.pItems[0];
2423  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2424  return &pItem->Value;
2425 }
2426 
2427 template<typename T>
2428 void VmaPoolAllocator<T>::Free(T* ptr)
2429 {
2430  // Search all memory blocks to find ptr.
2431  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2432  {
2433  ItemBlock& block = m_ItemBlocks[i];
2434 
2435  // Casting to union.
2436  Item* pItemPtr;
2437  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2438 
2439  // Check if pItemPtr is in address range of this block.
2440  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2441  {
2442  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2443  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2444  block.FirstFreeIndex = index;
2445  return;
2446  }
2447  }
2448  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2449 }
2450 
2451 template<typename T>
2452 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2453 {
2454  ItemBlock newBlock = {
2455  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2456 
2457  m_ItemBlocks.push_back(newBlock);
2458 
2459  // Setup singly-linked list of all free items in this block.
2460  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2461  newBlock.pItems[i].NextFreeIndex = i + 1;
2462  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2463  return m_ItemBlocks.back();
2464 }
2465 
2467 // class VmaRawList, VmaList
2468 
2469 #if VMA_USE_STL_LIST
2470 
2471 #define VmaList std::list
2472 
2473 #else // #if VMA_USE_STL_LIST
2474 
2475 template<typename T>
2476 struct VmaListItem
2477 {
2478  VmaListItem* pPrev;
2479  VmaListItem* pNext;
2480  T Value;
2481 };
2482 
2483 // Doubly linked list.
2484 template<typename T>
2485 class VmaRawList
2486 {
2487 public:
2488  typedef VmaListItem<T> ItemType;
2489 
2490  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2491  ~VmaRawList();
2492  void Clear();
2493 
2494  size_t GetCount() const { return m_Count; }
2495  bool IsEmpty() const { return m_Count == 0; }
2496 
2497  ItemType* Front() { return m_pFront; }
2498  const ItemType* Front() const { return m_pFront; }
2499  ItemType* Back() { return m_pBack; }
2500  const ItemType* Back() const { return m_pBack; }
2501 
2502  ItemType* PushBack();
2503  ItemType* PushFront();
2504  ItemType* PushBack(const T& value);
2505  ItemType* PushFront(const T& value);
2506  void PopBack();
2507  void PopFront();
2508 
2509  // Item can be null - it means PushBack.
2510  ItemType* InsertBefore(ItemType* pItem);
2511  // Item can be null - it means PushFront.
2512  ItemType* InsertAfter(ItemType* pItem);
2513 
2514  ItemType* InsertBefore(ItemType* pItem, const T& value);
2515  ItemType* InsertAfter(ItemType* pItem, const T& value);
2516 
2517  void Remove(ItemType* pItem);
2518 
2519 private:
2520  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2521  VmaPoolAllocator<ItemType> m_ItemAllocator;
2522  ItemType* m_pFront;
2523  ItemType* m_pBack;
2524  size_t m_Count;
2525 
2526  // Declared not defined, to block copy constructor and assignment operator.
2527  VmaRawList(const VmaRawList<T>& src);
2528  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2529 };
2530 
2531 template<typename T>
2532 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2533  m_pAllocationCallbacks(pAllocationCallbacks),
2534  m_ItemAllocator(pAllocationCallbacks, 128),
2535  m_pFront(VMA_NULL),
2536  m_pBack(VMA_NULL),
2537  m_Count(0)
2538 {
2539 }
2540 
2541 template<typename T>
2542 VmaRawList<T>::~VmaRawList()
2543 {
2544  // Intentionally not calling Clear, because that would be unnecessary
2545  // computations to return all items to m_ItemAllocator as free.
2546 }
2547 
2548 template<typename T>
2549 void VmaRawList<T>::Clear()
2550 {
2551  if(IsEmpty() == false)
2552  {
2553  ItemType* pItem = m_pBack;
2554  while(pItem != VMA_NULL)
2555  {
2556  ItemType* const pPrevItem = pItem->pPrev;
2557  m_ItemAllocator.Free(pItem);
2558  pItem = pPrevItem;
2559  }
2560  m_pFront = VMA_NULL;
2561  m_pBack = VMA_NULL;
2562  m_Count = 0;
2563  }
2564 }
2565 
2566 template<typename T>
2567 VmaListItem<T>* VmaRawList<T>::PushBack()
2568 {
2569  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2570  pNewItem->pNext = VMA_NULL;
2571  if(IsEmpty())
2572  {
2573  pNewItem->pPrev = VMA_NULL;
2574  m_pFront = pNewItem;
2575  m_pBack = pNewItem;
2576  m_Count = 1;
2577  }
2578  else
2579  {
2580  pNewItem->pPrev = m_pBack;
2581  m_pBack->pNext = pNewItem;
2582  m_pBack = pNewItem;
2583  ++m_Count;
2584  }
2585  return pNewItem;
2586 }
2587 
2588 template<typename T>
2589 VmaListItem<T>* VmaRawList<T>::PushFront()
2590 {
2591  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2592  pNewItem->pPrev = VMA_NULL;
2593  if(IsEmpty())
2594  {
2595  pNewItem->pNext = VMA_NULL;
2596  m_pFront = pNewItem;
2597  m_pBack = pNewItem;
2598  m_Count = 1;
2599  }
2600  else
2601  {
2602  pNewItem->pNext = m_pFront;
2603  m_pFront->pPrev = pNewItem;
2604  m_pFront = pNewItem;
2605  ++m_Count;
2606  }
2607  return pNewItem;
2608 }
2609 
2610 template<typename T>
2611 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2612 {
2613  ItemType* const pNewItem = PushBack();
2614  pNewItem->Value = value;
2615  return pNewItem;
2616 }
2617 
2618 template<typename T>
2619 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2620 {
2621  ItemType* const pNewItem = PushFront();
2622  pNewItem->Value = value;
2623  return pNewItem;
2624 }
2625 
2626 template<typename T>
2627 void VmaRawList<T>::PopBack()
2628 {
2629  VMA_HEAVY_ASSERT(m_Count > 0);
2630  ItemType* const pBackItem = m_pBack;
2631  ItemType* const pPrevItem = pBackItem->pPrev;
2632  if(pPrevItem != VMA_NULL)
2633  {
2634  pPrevItem->pNext = VMA_NULL;
2635  }
2636  m_pBack = pPrevItem;
2637  m_ItemAllocator.Free(pBackItem);
2638  --m_Count;
2639 }
2640 
2641 template<typename T>
2642 void VmaRawList<T>::PopFront()
2643 {
2644  VMA_HEAVY_ASSERT(m_Count > 0);
2645  ItemType* const pFrontItem = m_pFront;
2646  ItemType* const pNextItem = pFrontItem->pNext;
2647  if(pNextItem != VMA_NULL)
2648  {
2649  pNextItem->pPrev = VMA_NULL;
2650  }
2651  m_pFront = pNextItem;
2652  m_ItemAllocator.Free(pFrontItem);
2653  --m_Count;
2654 }
2655 
2656 template<typename T>
2657 void VmaRawList<T>::Remove(ItemType* pItem)
2658 {
2659  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2660  VMA_HEAVY_ASSERT(m_Count > 0);
2661 
2662  if(pItem->pPrev != VMA_NULL)
2663  {
2664  pItem->pPrev->pNext = pItem->pNext;
2665  }
2666  else
2667  {
2668  VMA_HEAVY_ASSERT(m_pFront == pItem);
2669  m_pFront = pItem->pNext;
2670  }
2671 
2672  if(pItem->pNext != VMA_NULL)
2673  {
2674  pItem->pNext->pPrev = pItem->pPrev;
2675  }
2676  else
2677  {
2678  VMA_HEAVY_ASSERT(m_pBack == pItem);
2679  m_pBack = pItem->pPrev;
2680  }
2681 
2682  m_ItemAllocator.Free(pItem);
2683  --m_Count;
2684 }
2685 
2686 template<typename T>
2687 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2688 {
2689  if(pItem != VMA_NULL)
2690  {
2691  ItemType* const prevItem = pItem->pPrev;
2692  ItemType* const newItem = m_ItemAllocator.Alloc();
2693  newItem->pPrev = prevItem;
2694  newItem->pNext = pItem;
2695  pItem->pPrev = newItem;
2696  if(prevItem != VMA_NULL)
2697  {
2698  prevItem->pNext = newItem;
2699  }
2700  else
2701  {
2702  VMA_HEAVY_ASSERT(m_pFront == pItem);
2703  m_pFront = newItem;
2704  }
2705  ++m_Count;
2706  return newItem;
2707  }
2708  else
2709  return PushBack();
2710 }
2711 
2712 template<typename T>
2713 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2714 {
2715  if(pItem != VMA_NULL)
2716  {
2717  ItemType* const nextItem = pItem->pNext;
2718  ItemType* const newItem = m_ItemAllocator.Alloc();
2719  newItem->pNext = nextItem;
2720  newItem->pPrev = pItem;
2721  pItem->pNext = newItem;
2722  if(nextItem != VMA_NULL)
2723  {
2724  nextItem->pPrev = newItem;
2725  }
2726  else
2727  {
2728  VMA_HEAVY_ASSERT(m_pBack == pItem);
2729  m_pBack = newItem;
2730  }
2731  ++m_Count;
2732  return newItem;
2733  }
2734  else
2735  return PushFront();
2736 }
2737 
2738 template<typename T>
2739 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2740 {
2741  ItemType* const newItem = InsertBefore(pItem);
2742  newItem->Value = value;
2743  return newItem;
2744 }
2745 
2746 template<typename T>
2747 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2748 {
2749  ItemType* const newItem = InsertAfter(pItem);
2750  newItem->Value = value;
2751  return newItem;
2752 }
2753 
2754 template<typename T, typename AllocatorT>
2755 class VmaList
2756 {
2757 public:
2758  class iterator
2759  {
2760  public:
2761  iterator() :
2762  m_pList(VMA_NULL),
2763  m_pItem(VMA_NULL)
2764  {
2765  }
2766 
2767  T& operator*() const
2768  {
2769  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2770  return m_pItem->Value;
2771  }
2772  T* operator->() const
2773  {
2774  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2775  return &m_pItem->Value;
2776  }
2777 
2778  iterator& operator++()
2779  {
2780  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2781  m_pItem = m_pItem->pNext;
2782  return *this;
2783  }
2784  iterator& operator--()
2785  {
2786  if(m_pItem != VMA_NULL)
2787  {
2788  m_pItem = m_pItem->pPrev;
2789  }
2790  else
2791  {
2792  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2793  m_pItem = m_pList->Back();
2794  }
2795  return *this;
2796  }
2797 
2798  iterator operator++(int)
2799  {
2800  iterator result = *this;
2801  ++*this;
2802  return result;
2803  }
2804  iterator operator--(int)
2805  {
2806  iterator result = *this;
2807  --*this;
2808  return result;
2809  }
2810 
2811  bool operator==(const iterator& rhs) const
2812  {
2813  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2814  return m_pItem == rhs.m_pItem;
2815  }
2816  bool operator!=(const iterator& rhs) const
2817  {
2818  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2819  return m_pItem != rhs.m_pItem;
2820  }
2821 
2822  private:
2823  VmaRawList<T>* m_pList;
2824  VmaListItem<T>* m_pItem;
2825 
2826  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2827  m_pList(pList),
2828  m_pItem(pItem)
2829  {
2830  }
2831 
2832  friend class VmaList<T, AllocatorT>;
2833  };
2834 
2835  class const_iterator
2836  {
2837  public:
2838  const_iterator() :
2839  m_pList(VMA_NULL),
2840  m_pItem(VMA_NULL)
2841  {
2842  }
2843 
2844  const_iterator(const iterator& src) :
2845  m_pList(src.m_pList),
2846  m_pItem(src.m_pItem)
2847  {
2848  }
2849 
2850  const T& operator*() const
2851  {
2852  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2853  return m_pItem->Value;
2854  }
2855  const T* operator->() const
2856  {
2857  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2858  return &m_pItem->Value;
2859  }
2860 
2861  const_iterator& operator++()
2862  {
2863  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2864  m_pItem = m_pItem->pNext;
2865  return *this;
2866  }
2867  const_iterator& operator--()
2868  {
2869  if(m_pItem != VMA_NULL)
2870  {
2871  m_pItem = m_pItem->pPrev;
2872  }
2873  else
2874  {
2875  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2876  m_pItem = m_pList->Back();
2877  }
2878  return *this;
2879  }
2880 
2881  const_iterator operator++(int)
2882  {
2883  const_iterator result = *this;
2884  ++*this;
2885  return result;
2886  }
2887  const_iterator operator--(int)
2888  {
2889  const_iterator result = *this;
2890  --*this;
2891  return result;
2892  }
2893 
2894  bool operator==(const const_iterator& rhs) const
2895  {
2896  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2897  return m_pItem == rhs.m_pItem;
2898  }
2899  bool operator!=(const const_iterator& rhs) const
2900  {
2901  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2902  return m_pItem != rhs.m_pItem;
2903  }
2904 
2905  private:
2906  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2907  m_pList(pList),
2908  m_pItem(pItem)
2909  {
2910  }
2911 
2912  const VmaRawList<T>* m_pList;
2913  const VmaListItem<T>* m_pItem;
2914 
2915  friend class VmaList<T, AllocatorT>;
2916  };
2917 
2918  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2919 
2920  bool empty() const { return m_RawList.IsEmpty(); }
2921  size_t size() const { return m_RawList.GetCount(); }
2922 
2923  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2924  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2925 
2926  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2927  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2928 
2929  void clear() { m_RawList.Clear(); }
2930  void push_back(const T& value) { m_RawList.PushBack(value); }
2931  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2932  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2933 
2934 private:
2935  VmaRawList<T> m_RawList;
2936 };
2937 
2938 #endif // #if VMA_USE_STL_LIST
2939 
2941 // class VmaMap
2942 
2943 // Unused in this version.
2944 #if 0
2945 
2946 #if VMA_USE_STL_UNORDERED_MAP
2947 
2948 #define VmaPair std::pair
2949 
2950 #define VMA_MAP_TYPE(KeyT, ValueT) \
2951  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2952 
2953 #else // #if VMA_USE_STL_UNORDERED_MAP
2954 
2955 template<typename T1, typename T2>
2956 struct VmaPair
2957 {
2958  T1 first;
2959  T2 second;
2960 
2961  VmaPair() : first(), second() { }
2962  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2963 };
2964 
2965 /* Class compatible with subset of interface of std::unordered_map.
2966 KeyT, ValueT must be POD because they will be stored in VmaVector.
2967 */
2968 template<typename KeyT, typename ValueT>
2969 class VmaMap
2970 {
2971 public:
2972  typedef VmaPair<KeyT, ValueT> PairType;
2973  typedef PairType* iterator;
2974 
2975  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2976 
2977  iterator begin() { return m_Vector.begin(); }
2978  iterator end() { return m_Vector.end(); }
2979 
2980  void insert(const PairType& pair);
2981  iterator find(const KeyT& key);
2982  void erase(iterator it);
2983 
2984 private:
2985  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2986 };
2987 
2988 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2989 
2990 template<typename FirstT, typename SecondT>
2991 struct VmaPairFirstLess
2992 {
2993  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2994  {
2995  return lhs.first < rhs.first;
2996  }
2997  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2998  {
2999  return lhs.first < rhsFirst;
3000  }
3001 };
3002 
3003 template<typename KeyT, typename ValueT>
3004 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3005 {
3006  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3007  m_Vector.data(),
3008  m_Vector.data() + m_Vector.size(),
3009  pair,
3010  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3011  VmaVectorInsert(m_Vector, indexToInsert, pair);
3012 }
3013 
3014 template<typename KeyT, typename ValueT>
3015 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3016 {
3017  PairType* it = VmaBinaryFindFirstNotLess(
3018  m_Vector.data(),
3019  m_Vector.data() + m_Vector.size(),
3020  key,
3021  VmaPairFirstLess<KeyT, ValueT>());
3022  if((it != m_Vector.end()) && (it->first == key))
3023  {
3024  return it;
3025  }
3026  else
3027  {
3028  return m_Vector.end();
3029  }
3030 }
3031 
3032 template<typename KeyT, typename ValueT>
3033 void VmaMap<KeyT, ValueT>::erase(iterator it)
3034 {
3035  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3036 }
3037 
3038 #endif // #if VMA_USE_STL_UNORDERED_MAP
3039 
3040 #endif // #if 0
3041 
3043 
3044 class VmaDeviceMemoryBlock;
3045 
3046 struct VmaAllocation_T
3047 {
3048 private:
3049  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3050 
3051  enum FLAGS
3052  {
3053  FLAG_USER_DATA_STRING = 0x01,
3054  };
3055 
3056 public:
3057  enum ALLOCATION_TYPE
3058  {
3059  ALLOCATION_TYPE_NONE,
3060  ALLOCATION_TYPE_BLOCK,
3061  ALLOCATION_TYPE_DEDICATED,
3062  };
3063 
3064  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3065  m_Alignment(1),
3066  m_Size(0),
3067  m_pUserData(VMA_NULL),
3068  m_LastUseFrameIndex(currentFrameIndex),
3069  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3070  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3071  m_MapCount(0),
3072  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3073  {
3074  }
3075 
3076  ~VmaAllocation_T()
3077  {
3078  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3079 
3080  // Check if owned string was freed.
3081  VMA_ASSERT(m_pUserData == VMA_NULL);
3082  }
3083 
3084  void InitBlockAllocation(
3085  VmaPool hPool,
3086  VmaDeviceMemoryBlock* block,
3087  VkDeviceSize offset,
3088  VkDeviceSize alignment,
3089  VkDeviceSize size,
3090  VmaSuballocationType suballocationType,
3091  bool mapped,
3092  bool canBecomeLost)
3093  {
3094  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3095  VMA_ASSERT(block != VMA_NULL);
3096  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3097  m_Alignment = alignment;
3098  m_Size = size;
3099  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3100  m_SuballocationType = (uint8_t)suballocationType;
3101  m_BlockAllocation.m_hPool = hPool;
3102  m_BlockAllocation.m_Block = block;
3103  m_BlockAllocation.m_Offset = offset;
3104  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3105  }
3106 
3107  void InitLost()
3108  {
3109  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3110  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3111  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3112  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3113  m_BlockAllocation.m_Block = VMA_NULL;
3114  m_BlockAllocation.m_Offset = 0;
3115  m_BlockAllocation.m_CanBecomeLost = true;
3116  }
3117 
3118  void ChangeBlockAllocation(
3119  VmaDeviceMemoryBlock* block,
3120  VkDeviceSize offset)
3121  {
3122  VMA_ASSERT(block != VMA_NULL);
3123  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3124  m_BlockAllocation.m_Block = block;
3125  m_BlockAllocation.m_Offset = offset;
3126  }
3127 
3128  // pMappedData not null means allocation is created with MAPPED flag.
3129  void InitDedicatedAllocation(
3130  uint32_t memoryTypeIndex,
3131  VkDeviceMemory hMemory,
3132  VmaSuballocationType suballocationType,
3133  void* pMappedData,
3134  VkDeviceSize size)
3135  {
3136  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3137  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3138  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3139  m_Alignment = 0;
3140  m_Size = size;
3141  m_SuballocationType = (uint8_t)suballocationType;
3142  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3143  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3144  m_DedicatedAllocation.m_hMemory = hMemory;
3145  m_DedicatedAllocation.m_pMappedData = pMappedData;
3146  }
3147 
3148  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3149  VkDeviceSize GetAlignment() const { return m_Alignment; }
3150  VkDeviceSize GetSize() const { return m_Size; }
3151  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3152  void* GetUserData() const { return m_pUserData; }
3153  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3154  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3155 
3156  VmaDeviceMemoryBlock* GetBlock() const
3157  {
3158  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3159  return m_BlockAllocation.m_Block;
3160  }
3161  VkDeviceSize GetOffset() const;
3162  VkDeviceMemory GetMemory() const;
3163  uint32_t GetMemoryTypeIndex() const;
3164  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3165  void* GetMappedData() const;
3166  bool CanBecomeLost() const;
3167  VmaPool GetPool() const;
3168 
3169  uint32_t GetLastUseFrameIndex() const
3170  {
3171  return m_LastUseFrameIndex.load();
3172  }
3173  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3174  {
3175  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3176  }
3177  /*
3178  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3179  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3180  - Else, returns false.
3181 
3182  If hAllocation is already lost, assert - you should not call it then.
3183  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3184  */
3185  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3186 
3187  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3188  {
3189  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3190  outInfo.blockCount = 1;
3191  outInfo.allocationCount = 1;
3192  outInfo.unusedRangeCount = 0;
3193  outInfo.usedBytes = m_Size;
3194  outInfo.unusedBytes = 0;
3195  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3196  outInfo.unusedRangeSizeMin = UINT64_MAX;
3197  outInfo.unusedRangeSizeMax = 0;
3198  }
3199 
3200  void BlockAllocMap();
3201  void BlockAllocUnmap();
3202  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3203  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3204 
3205 private:
3206  VkDeviceSize m_Alignment;
3207  VkDeviceSize m_Size;
3208  void* m_pUserData;
3209  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3210  uint8_t m_Type; // ALLOCATION_TYPE
3211  uint8_t m_SuballocationType; // VmaSuballocationType
3212  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3213  // Bits with mask 0x7F, used only when ALLOCATION_TYPE_DEDICATED, are reference counter for vmaMapMemory()/vmaUnmapMemory().
3214  uint8_t m_MapCount;
3215  uint8_t m_Flags; // enum FLAGS
3216 
3217  // Allocation out of VmaDeviceMemoryBlock.
3218  struct BlockAllocation
3219  {
3220  VmaPool m_hPool; // Null if belongs to general memory.
3221  VmaDeviceMemoryBlock* m_Block;
3222  VkDeviceSize m_Offset;
3223  bool m_CanBecomeLost;
3224  };
3225 
3226  // Allocation for an object that has its own private VkDeviceMemory.
3227  struct DedicatedAllocation
3228  {
3229  uint32_t m_MemoryTypeIndex;
3230  VkDeviceMemory m_hMemory;
3231  void* m_pMappedData; // Not null means memory is mapped.
3232  };
3233 
3234  union
3235  {
3236  // Allocation out of VmaDeviceMemoryBlock.
3237  BlockAllocation m_BlockAllocation;
3238  // Allocation for an object that has its own private VkDeviceMemory.
3239  DedicatedAllocation m_DedicatedAllocation;
3240  };
3241 
3242  void FreeUserDataString(VmaAllocator hAllocator);
3243 };
3244 
3245 /*
3246 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3247 allocated memory block or free.
3248 */
3249 struct VmaSuballocation
3250 {
3251  VkDeviceSize offset;
3252  VkDeviceSize size;
3253  VmaAllocation hAllocation;
3254  VmaSuballocationType type;
3255 };
3256 
3257 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3258 
3259 // Cost of one additional allocation lost, as equivalent in bytes.
3260 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3261 
3262 /*
3263 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3264 
3265 If canMakeOtherLost was false:
3266 - item points to a FREE suballocation.
3267 - itemsToMakeLostCount is 0.
3268 
3269 If canMakeOtherLost was true:
3270 - item points to first of sequence of suballocations, which are either FREE,
3271  or point to VmaAllocations that can become lost.
3272 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3273  the requested allocation to succeed.
3274 */
3275 struct VmaAllocationRequest
3276 {
3277  VkDeviceSize offset;
3278  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3279  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3280  VmaSuballocationList::iterator item;
3281  size_t itemsToMakeLostCount;
3282 
3283  VkDeviceSize CalcCost() const
3284  {
3285  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3286  }
3287 };
3288 
3289 /*
3290 Data structure used for bookkeeping of allocations and unused ranges of memory
3291 in a single VkDeviceMemory block.
3292 */
3293 class VmaBlockMetadata
3294 {
3295 public:
3296  VmaBlockMetadata(VmaAllocator hAllocator);
3297  ~VmaBlockMetadata();
3298  void Init(VkDeviceSize size);
3299 
3300  // Validates all data structures inside this object. If not valid, returns false.
3301  bool Validate() const;
3302  VkDeviceSize GetSize() const { return m_Size; }
3303  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3304  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3305  VkDeviceSize GetUnusedRangeSizeMax() const;
3306  // Returns true if this block is empty - contains only single free suballocation.
3307  bool IsEmpty() const;
3308 
3309  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3310  void AddPoolStats(VmaPoolStats& inoutStats) const;
3311 
3312 #if VMA_STATS_STRING_ENABLED
3313  void PrintDetailedMap(class VmaJsonWriter& json) const;
3314 #endif
3315 
3316  // Creates trivial request for case when block is empty.
3317  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3318 
3319  // Tries to find a place for suballocation with given parameters inside this block.
3320  // If succeeded, fills pAllocationRequest and returns true.
3321  // If failed, returns false.
3322  bool CreateAllocationRequest(
3323  uint32_t currentFrameIndex,
3324  uint32_t frameInUseCount,
3325  VkDeviceSize bufferImageGranularity,
3326  VkDeviceSize allocSize,
3327  VkDeviceSize allocAlignment,
3328  VmaSuballocationType allocType,
3329  bool canMakeOtherLost,
3330  VmaAllocationRequest* pAllocationRequest);
3331 
3332  bool MakeRequestedAllocationsLost(
3333  uint32_t currentFrameIndex,
3334  uint32_t frameInUseCount,
3335  VmaAllocationRequest* pAllocationRequest);
3336 
3337  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3338 
3339  // Makes actual allocation based on request. Request must already be checked and valid.
3340  void Alloc(
3341  const VmaAllocationRequest& request,
3342  VmaSuballocationType type,
3343  VkDeviceSize allocSize,
3344  VmaAllocation hAllocation);
3345 
3346  // Frees suballocation assigned to given memory region.
3347  void Free(const VmaAllocation allocation);
3348 
3349 private:
3350  VkDeviceSize m_Size;
3351  uint32_t m_FreeCount;
3352  VkDeviceSize m_SumFreeSize;
3353  VmaSuballocationList m_Suballocations;
3354  // Suballocations that are free and have size greater than certain threshold.
3355  // Sorted by size, ascending.
3356  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3357 
3358  bool ValidateFreeSuballocationList() const;
3359 
3360  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3361  // If yes, fills pOffset and returns true. If no, returns false.
3362  bool CheckAllocation(
3363  uint32_t currentFrameIndex,
3364  uint32_t frameInUseCount,
3365  VkDeviceSize bufferImageGranularity,
3366  VkDeviceSize allocSize,
3367  VkDeviceSize allocAlignment,
3368  VmaSuballocationType allocType,
3369  VmaSuballocationList::const_iterator suballocItem,
3370  bool canMakeOtherLost,
3371  VkDeviceSize* pOffset,
3372  size_t* itemsToMakeLostCount,
3373  VkDeviceSize* pSumFreeSize,
3374  VkDeviceSize* pSumItemSize) const;
3375  // Given free suballocation, it merges it with following one, which must also be free.
3376  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3377  // Releases given suballocation, making it free.
3378  // Merges it with adjacent free suballocations if applicable.
3379  // Returns iterator to new free suballocation at this place.
3380  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3381  // Given free suballocation, it inserts it into sorted list of
3382  // m_FreeSuballocationsBySize if it's suitable.
3383  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3384  // Given free suballocation, it removes it from sorted list of
3385  // m_FreeSuballocationsBySize if it's suitable.
3386  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3387 };
3388 
3389 // Helper class that represents mapped memory. Synchronized internally.
3390 class VmaDeviceMemoryMapping
3391 {
3392 public:
3393  VmaDeviceMemoryMapping();
3394  ~VmaDeviceMemoryMapping();
3395 
3396  void* GetMappedData() const { return m_pMappedData; }
3397 
3398  // ppData can be null.
3399  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData);
3400  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory);
3401 
3402 private:
3403  VMA_MUTEX m_Mutex;
3404  uint32_t m_MapCount;
3405  void* m_pMappedData;
3406 };
3407 
3408 /*
3409 Represents a single block of device memory (`VkDeviceMemory`) with all the
3410 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3411 
3412 Thread-safety: This class must be externally synchronized.
3413 */
3414 class VmaDeviceMemoryBlock
3415 {
3416 public:
3417  uint32_t m_MemoryTypeIndex;
3418  VkDeviceMemory m_hMemory;
3419  VmaDeviceMemoryMapping m_Mapping;
3420  VmaBlockMetadata m_Metadata;
3421 
3422  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3423 
3424  ~VmaDeviceMemoryBlock()
3425  {
3426  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3427  }
3428 
3429  // Always call after construction.
3430  void Init(
3431  uint32_t newMemoryTypeIndex,
3432  VkDeviceMemory newMemory,
3433  VkDeviceSize newSize);
3434  // Always call before destruction.
3435  void Destroy(VmaAllocator allocator);
3436 
3437  // Validates all data structures inside this object. If not valid, returns false.
3438  bool Validate() const;
3439 
3440  // ppData can be null.
3441  VkResult Map(VmaAllocator hAllocator, void** ppData);
3442  void Unmap(VmaAllocator hAllocator);
3443 };
3444 
3445 struct VmaPointerLess
3446 {
3447  bool operator()(const void* lhs, const void* rhs) const
3448  {
3449  return lhs < rhs;
3450  }
3451 };
3452 
3453 class VmaDefragmentator;
3454 
3455 /*
3456 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3457 Vulkan memory type.
3458 
3459 Synchronized internally with a mutex.
3460 */
3461 struct VmaBlockVector
3462 {
3463  VmaBlockVector(
3464  VmaAllocator hAllocator,
3465  uint32_t memoryTypeIndex,
3466  VkDeviceSize preferredBlockSize,
3467  size_t minBlockCount,
3468  size_t maxBlockCount,
3469  VkDeviceSize bufferImageGranularity,
3470  uint32_t frameInUseCount,
3471  bool isCustomPool);
3472  ~VmaBlockVector();
3473 
3474  VkResult CreateMinBlocks();
3475 
3476  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3477  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3478  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3479  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3480 
3481  void GetPoolStats(VmaPoolStats* pStats);
3482 
3483  bool IsEmpty() const { return m_Blocks.empty(); }
3484 
3485  VkResult Allocate(
3486  VmaPool hCurrentPool,
3487  uint32_t currentFrameIndex,
3488  const VkMemoryRequirements& vkMemReq,
3489  const VmaAllocationCreateInfo& createInfo,
3490  VmaSuballocationType suballocType,
3491  VmaAllocation* pAllocation);
3492 
3493  void Free(
3494  VmaAllocation hAllocation);
3495 
3496  // Adds statistics of this BlockVector to pStats.
3497  void AddStats(VmaStats* pStats);
3498 
3499 #if VMA_STATS_STRING_ENABLED
3500  void PrintDetailedMap(class VmaJsonWriter& json);
3501 #endif
3502 
3503  void MakePoolAllocationsLost(
3504  uint32_t currentFrameIndex,
3505  size_t* pLostAllocationCount);
3506 
3507  VmaDefragmentator* EnsureDefragmentator(
3508  VmaAllocator hAllocator,
3509  uint32_t currentFrameIndex);
3510 
3511  VkResult Defragment(
3512  VmaDefragmentationStats* pDefragmentationStats,
3513  VkDeviceSize& maxBytesToMove,
3514  uint32_t& maxAllocationsToMove);
3515 
3516  void DestroyDefragmentator();
3517 
3518 private:
3519  friend class VmaDefragmentator;
3520 
3521  const VmaAllocator m_hAllocator;
3522  const uint32_t m_MemoryTypeIndex;
3523  const VkDeviceSize m_PreferredBlockSize;
3524  const size_t m_MinBlockCount;
3525  const size_t m_MaxBlockCount;
3526  const VkDeviceSize m_BufferImageGranularity;
3527  const uint32_t m_FrameInUseCount;
3528  const bool m_IsCustomPool;
3529  VMA_MUTEX m_Mutex;
3530  // Incrementally sorted by sumFreeSize, ascending.
3531  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3532  /* There can be at most one allocation that is completely empty - a
3533  hysteresis to avoid pessimistic case of alternating creation and destruction
3534  of a VkDeviceMemory. */
3535  bool m_HasEmptyBlock;
3536  VmaDefragmentator* m_pDefragmentator;
3537 
3538  // Finds and removes given block from vector.
3539  void Remove(VmaDeviceMemoryBlock* pBlock);
3540 
3541  // Performs single step in sorting m_Blocks. They may not be fully sorted
3542  // after this call.
3543  void IncrementallySortBlocks();
3544 
3545  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3546 };
3547 
3548 struct VmaPool_T
3549 {
3550 public:
3551  VmaBlockVector m_BlockVector;
3552 
3553  // Takes ownership.
3554  VmaPool_T(
3555  VmaAllocator hAllocator,
3556  const VmaPoolCreateInfo& createInfo);
3557  ~VmaPool_T();
3558 
3559  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3560 
3561 #if VMA_STATS_STRING_ENABLED
3562  //void PrintDetailedMap(class VmaStringBuilder& sb);
3563 #endif
3564 };
3565 
3566 class VmaDefragmentator
3567 {
3568  const VmaAllocator m_hAllocator;
3569  VmaBlockVector* const m_pBlockVector;
3570  uint32_t m_CurrentFrameIndex;
3571  VkDeviceSize m_BytesMoved;
3572  uint32_t m_AllocationsMoved;
3573 
3574  struct AllocationInfo
3575  {
3576  VmaAllocation m_hAllocation;
3577  VkBool32* m_pChanged;
3578 
3579  AllocationInfo() :
3580  m_hAllocation(VK_NULL_HANDLE),
3581  m_pChanged(VMA_NULL)
3582  {
3583  }
3584  };
3585 
3586  struct AllocationInfoSizeGreater
3587  {
3588  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3589  {
3590  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3591  }
3592  };
3593 
3594  // Used between AddAllocation and Defragment.
3595  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3596 
3597  struct BlockInfo
3598  {
3599  VmaDeviceMemoryBlock* m_pBlock;
3600  bool m_HasNonMovableAllocations;
3601  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3602 
3603  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3604  m_pBlock(VMA_NULL),
3605  m_HasNonMovableAllocations(true),
3606  m_Allocations(pAllocationCallbacks),
3607  m_pMappedDataForDefragmentation(VMA_NULL)
3608  {
3609  }
3610 
3611  void CalcHasNonMovableAllocations()
3612  {
3613  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3614  const size_t defragmentAllocCount = m_Allocations.size();
3615  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3616  }
3617 
3618  void SortAllocationsBySizeDescecnding()
3619  {
3620  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3621  }
3622 
3623  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3624  void Unmap(VmaAllocator hAllocator);
3625 
3626  private:
3627  // Not null if mapped for defragmentation only, not originally mapped.
3628  void* m_pMappedDataForDefragmentation;
3629  };
3630 
3631  struct BlockPointerLess
3632  {
3633  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3634  {
3635  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3636  }
3637  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3638  {
3639  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3640  }
3641  };
3642 
3643  // 1. Blocks with some non-movable allocations go first.
3644  // 2. Blocks with smaller sumFreeSize go first.
3645  struct BlockInfoCompareMoveDestination
3646  {
3647  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3648  {
3649  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3650  {
3651  return true;
3652  }
3653  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3654  {
3655  return false;
3656  }
3657  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3658  {
3659  return true;
3660  }
3661  return false;
3662  }
3663  };
3664 
3665  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3666  BlockInfoVector m_Blocks;
3667 
3668  VkResult DefragmentRound(
3669  VkDeviceSize maxBytesToMove,
3670  uint32_t maxAllocationsToMove);
3671 
3672  static bool MoveMakesSense(
3673  size_t dstBlockIndex, VkDeviceSize dstOffset,
3674  size_t srcBlockIndex, VkDeviceSize srcOffset);
3675 
3676 public:
3677  VmaDefragmentator(
3678  VmaAllocator hAllocator,
3679  VmaBlockVector* pBlockVector,
3680  uint32_t currentFrameIndex);
3681 
3682  ~VmaDefragmentator();
3683 
3684  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3685  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3686 
3687  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3688 
3689  VkResult Defragment(
3690  VkDeviceSize maxBytesToMove,
3691  uint32_t maxAllocationsToMove);
3692 };
3693 
3694 // Main allocator object.
3695 struct VmaAllocator_T
3696 {
3697  bool m_UseMutex;
3698  bool m_UseKhrDedicatedAllocation;
3699  VkDevice m_hDevice;
3700  bool m_AllocationCallbacksSpecified;
3701  VkAllocationCallbacks m_AllocationCallbacks;
3702  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3703 
3704  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3705  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3706  VMA_MUTEX m_HeapSizeLimitMutex;
3707 
3708  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3709  VkPhysicalDeviceMemoryProperties m_MemProps;
3710 
3711  // Default pools.
3712  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
3713 
3714  // Each vector is sorted by memory (handle value).
3715  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3716  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
3717  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
3718 
3719  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3720  ~VmaAllocator_T();
3721 
3722  const VkAllocationCallbacks* GetAllocationCallbacks() const
3723  {
3724  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3725  }
3726  const VmaVulkanFunctions& GetVulkanFunctions() const
3727  {
3728  return m_VulkanFunctions;
3729  }
3730 
3731  VkDeviceSize GetBufferImageGranularity() const
3732  {
3733  return VMA_MAX(
3734  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3735  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3736  }
3737 
3738  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3739  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3740 
3741  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3742  {
3743  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3744  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3745  }
3746 
3747  void GetBufferMemoryRequirements(
3748  VkBuffer hBuffer,
3749  VkMemoryRequirements& memReq,
3750  bool& requiresDedicatedAllocation,
3751  bool& prefersDedicatedAllocation) const;
3752  void GetImageMemoryRequirements(
3753  VkImage hImage,
3754  VkMemoryRequirements& memReq,
3755  bool& requiresDedicatedAllocation,
3756  bool& prefersDedicatedAllocation) const;
3757 
3758  // Main allocation function.
3759  VkResult AllocateMemory(
3760  const VkMemoryRequirements& vkMemReq,
3761  bool requiresDedicatedAllocation,
3762  bool prefersDedicatedAllocation,
3763  VkBuffer dedicatedBuffer,
3764  VkImage dedicatedImage,
3765  const VmaAllocationCreateInfo& createInfo,
3766  VmaSuballocationType suballocType,
3767  VmaAllocation* pAllocation);
3768 
3769  // Main deallocation function.
3770  void FreeMemory(const VmaAllocation allocation);
3771 
3772  void CalculateStats(VmaStats* pStats);
3773 
3774 #if VMA_STATS_STRING_ENABLED
3775  void PrintDetailedMap(class VmaJsonWriter& json);
3776 #endif
3777 
3778  VkResult Defragment(
3779  VmaAllocation* pAllocations,
3780  size_t allocationCount,
3781  VkBool32* pAllocationsChanged,
3782  const VmaDefragmentationInfo* pDefragmentationInfo,
3783  VmaDefragmentationStats* pDefragmentationStats);
3784 
3785  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3786 
3787  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3788  void DestroyPool(VmaPool pool);
3789  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3790 
3791  void SetCurrentFrameIndex(uint32_t frameIndex);
3792 
3793  void MakePoolAllocationsLost(
3794  VmaPool hPool,
3795  size_t* pLostAllocationCount);
3796 
3797  void CreateLostAllocation(VmaAllocation* pAllocation);
3798 
3799  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3800  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3801 
3802  VkResult Map(VmaAllocation hAllocation, void** ppData);
3803  void Unmap(VmaAllocation hAllocation);
3804 
3805 private:
3806  VkDeviceSize m_PreferredLargeHeapBlockSize;
3807  VkDeviceSize m_PreferredSmallHeapBlockSize;
3808 
3809  VkPhysicalDevice m_PhysicalDevice;
3810  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3811 
3812  VMA_MUTEX m_PoolsMutex;
3813  // Protected by m_PoolsMutex. Sorted by pointer value.
3814  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3815 
3816  VmaVulkanFunctions m_VulkanFunctions;
3817 
3818  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3819 
3820  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3821 
3822  VkResult AllocateMemoryOfType(
3823  const VkMemoryRequirements& vkMemReq,
3824  bool dedicatedAllocation,
3825  VkBuffer dedicatedBuffer,
3826  VkImage dedicatedImage,
3827  const VmaAllocationCreateInfo& createInfo,
3828  uint32_t memTypeIndex,
3829  VmaSuballocationType suballocType,
3830  VmaAllocation* pAllocation);
3831 
3832  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3833  VkResult AllocateDedicatedMemory(
3834  VkDeviceSize size,
3835  VmaSuballocationType suballocType,
3836  uint32_t memTypeIndex,
3837  bool map,
3838  bool isUserDataString,
3839  void* pUserData,
3840  VkBuffer dedicatedBuffer,
3841  VkImage dedicatedImage,
3842  VmaAllocation* pAllocation);
3843 
3844  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
3845  void FreeDedicatedMemory(VmaAllocation allocation);
3846 };
3847 
3849 // Memory allocation #2 after VmaAllocator_T definition
3850 
3851 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3852 {
3853  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3854 }
3855 
3856 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3857 {
3858  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3859 }
3860 
3861 template<typename T>
3862 static T* VmaAllocate(VmaAllocator hAllocator)
3863 {
3864  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3865 }
3866 
3867 template<typename T>
3868 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3869 {
3870  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3871 }
3872 
3873 template<typename T>
3874 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3875 {
3876  if(ptr != VMA_NULL)
3877  {
3878  ptr->~T();
3879  VmaFree(hAllocator, ptr);
3880  }
3881 }
3882 
3883 template<typename T>
3884 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3885 {
3886  if(ptr != VMA_NULL)
3887  {
3888  for(size_t i = count; i--; )
3889  ptr[i].~T();
3890  VmaFree(hAllocator, ptr);
3891  }
3892 }
3893 
3895 // VmaStringBuilder
3896 
3897 #if VMA_STATS_STRING_ENABLED
3898 
3899 class VmaStringBuilder
3900 {
3901 public:
3902  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3903  size_t GetLength() const { return m_Data.size(); }
3904  const char* GetData() const { return m_Data.data(); }
3905 
3906  void Add(char ch) { m_Data.push_back(ch); }
3907  void Add(const char* pStr);
3908  void AddNewLine() { Add('\n'); }
3909  void AddNumber(uint32_t num);
3910  void AddNumber(uint64_t num);
3911  void AddPointer(const void* ptr);
3912 
3913 private:
3914  VmaVector< char, VmaStlAllocator<char> > m_Data;
3915 };
3916 
3917 void VmaStringBuilder::Add(const char* pStr)
3918 {
3919  const size_t strLen = strlen(pStr);
3920  if(strLen > 0)
3921  {
3922  const size_t oldCount = m_Data.size();
3923  m_Data.resize(oldCount + strLen);
3924  memcpy(m_Data.data() + oldCount, pStr, strLen);
3925  }
3926 }
3927 
3928 void VmaStringBuilder::AddNumber(uint32_t num)
3929 {
3930  char buf[11];
3931  VmaUint32ToStr(buf, sizeof(buf), num);
3932  Add(buf);
3933 }
3934 
3935 void VmaStringBuilder::AddNumber(uint64_t num)
3936 {
3937  char buf[21];
3938  VmaUint64ToStr(buf, sizeof(buf), num);
3939  Add(buf);
3940 }
3941 
3942 void VmaStringBuilder::AddPointer(const void* ptr)
3943 {
3944  char buf[21];
3945  VmaPtrToStr(buf, sizeof(buf), ptr);
3946  Add(buf);
3947 }
3948 
3949 #endif // #if VMA_STATS_STRING_ENABLED
3950 
3952 // VmaJsonWriter
3953 
3954 #if VMA_STATS_STRING_ENABLED
3955 
3956 class VmaJsonWriter
3957 {
3958 public:
3959  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3960  ~VmaJsonWriter();
3961 
3962  void BeginObject(bool singleLine = false);
3963  void EndObject();
3964 
3965  void BeginArray(bool singleLine = false);
3966  void EndArray();
3967 
3968  void WriteString(const char* pStr);
3969  void BeginString(const char* pStr = VMA_NULL);
3970  void ContinueString(const char* pStr);
3971  void ContinueString(uint32_t n);
3972  void ContinueString(uint64_t n);
3973  void ContinueString_Pointer(const void* ptr);
3974  void EndString(const char* pStr = VMA_NULL);
3975 
3976  void WriteNumber(uint32_t n);
3977  void WriteNumber(uint64_t n);
3978  void WriteBool(bool b);
3979  void WriteNull();
3980 
3981 private:
3982  static const char* const INDENT;
3983 
3984  enum COLLECTION_TYPE
3985  {
3986  COLLECTION_TYPE_OBJECT,
3987  COLLECTION_TYPE_ARRAY,
3988  };
3989  struct StackItem
3990  {
3991  COLLECTION_TYPE type;
3992  uint32_t valueCount;
3993  bool singleLineMode;
3994  };
3995 
3996  VmaStringBuilder& m_SB;
3997  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3998  bool m_InsideString;
3999 
4000  void BeginValue(bool isString);
4001  void WriteIndent(bool oneLess = false);
4002 };
4003 
4004 const char* const VmaJsonWriter::INDENT = " ";
4005 
4006 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4007  m_SB(sb),
4008  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4009  m_InsideString(false)
4010 {
4011 }
4012 
4013 VmaJsonWriter::~VmaJsonWriter()
4014 {
4015  VMA_ASSERT(!m_InsideString);
4016  VMA_ASSERT(m_Stack.empty());
4017 }
4018 
4019 void VmaJsonWriter::BeginObject(bool singleLine)
4020 {
4021  VMA_ASSERT(!m_InsideString);
4022 
4023  BeginValue(false);
4024  m_SB.Add('{');
4025 
4026  StackItem item;
4027  item.type = COLLECTION_TYPE_OBJECT;
4028  item.valueCount = 0;
4029  item.singleLineMode = singleLine;
4030  m_Stack.push_back(item);
4031 }
4032 
4033 void VmaJsonWriter::EndObject()
4034 {
4035  VMA_ASSERT(!m_InsideString);
4036 
4037  WriteIndent(true);
4038  m_SB.Add('}');
4039 
4040  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4041  m_Stack.pop_back();
4042 }
4043 
4044 void VmaJsonWriter::BeginArray(bool singleLine)
4045 {
4046  VMA_ASSERT(!m_InsideString);
4047 
4048  BeginValue(false);
4049  m_SB.Add('[');
4050 
4051  StackItem item;
4052  item.type = COLLECTION_TYPE_ARRAY;
4053  item.valueCount = 0;
4054  item.singleLineMode = singleLine;
4055  m_Stack.push_back(item);
4056 }
4057 
4058 void VmaJsonWriter::EndArray()
4059 {
4060  VMA_ASSERT(!m_InsideString);
4061 
4062  WriteIndent(true);
4063  m_SB.Add(']');
4064 
4065  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4066  m_Stack.pop_back();
4067 }
4068 
4069 void VmaJsonWriter::WriteString(const char* pStr)
4070 {
4071  BeginString(pStr);
4072  EndString();
4073 }
4074 
4075 void VmaJsonWriter::BeginString(const char* pStr)
4076 {
4077  VMA_ASSERT(!m_InsideString);
4078 
4079  BeginValue(true);
4080  m_SB.Add('"');
4081  m_InsideString = true;
4082  if(pStr != VMA_NULL && pStr[0] != '\0')
4083  {
4084  ContinueString(pStr);
4085  }
4086 }
4087 
4088 void VmaJsonWriter::ContinueString(const char* pStr)
4089 {
4090  VMA_ASSERT(m_InsideString);
4091 
4092  const size_t strLen = strlen(pStr);
4093  for(size_t i = 0; i < strLen; ++i)
4094  {
4095  char ch = pStr[i];
4096  if(ch == '\'')
4097  {
4098  m_SB.Add("\\\\");
4099  }
4100  else if(ch == '"')
4101  {
4102  m_SB.Add("\\\"");
4103  }
4104  else if(ch >= 32)
4105  {
4106  m_SB.Add(ch);
4107  }
4108  else switch(ch)
4109  {
4110  case '\b':
4111  m_SB.Add("\\b");
4112  break;
4113  case '\f':
4114  m_SB.Add("\\f");
4115  break;
4116  case '\n':
4117  m_SB.Add("\\n");
4118  break;
4119  case '\r':
4120  m_SB.Add("\\r");
4121  break;
4122  case '\t':
4123  m_SB.Add("\\t");
4124  break;
4125  default:
4126  VMA_ASSERT(0 && "Character not currently supported.");
4127  break;
4128  }
4129  }
4130 }
4131 
4132 void VmaJsonWriter::ContinueString(uint32_t n)
4133 {
4134  VMA_ASSERT(m_InsideString);
4135  m_SB.AddNumber(n);
4136 }
4137 
4138 void VmaJsonWriter::ContinueString(uint64_t n)
4139 {
4140  VMA_ASSERT(m_InsideString);
4141  m_SB.AddNumber(n);
4142 }
4143 
4144 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4145 {
4146  VMA_ASSERT(m_InsideString);
4147  m_SB.AddPointer(ptr);
4148 }
4149 
4150 void VmaJsonWriter::EndString(const char* pStr)
4151 {
4152  VMA_ASSERT(m_InsideString);
4153  if(pStr != VMA_NULL && pStr[0] != '\0')
4154  {
4155  ContinueString(pStr);
4156  }
4157  m_SB.Add('"');
4158  m_InsideString = false;
4159 }
4160 
4161 void VmaJsonWriter::WriteNumber(uint32_t n)
4162 {
4163  VMA_ASSERT(!m_InsideString);
4164  BeginValue(false);
4165  m_SB.AddNumber(n);
4166 }
4167 
4168 void VmaJsonWriter::WriteNumber(uint64_t n)
4169 {
4170  VMA_ASSERT(!m_InsideString);
4171  BeginValue(false);
4172  m_SB.AddNumber(n);
4173 }
4174 
4175 void VmaJsonWriter::WriteBool(bool b)
4176 {
4177  VMA_ASSERT(!m_InsideString);
4178  BeginValue(false);
4179  m_SB.Add(b ? "true" : "false");
4180 }
4181 
4182 void VmaJsonWriter::WriteNull()
4183 {
4184  VMA_ASSERT(!m_InsideString);
4185  BeginValue(false);
4186  m_SB.Add("null");
4187 }
4188 
4189 void VmaJsonWriter::BeginValue(bool isString)
4190 {
4191  if(!m_Stack.empty())
4192  {
4193  StackItem& currItem = m_Stack.back();
4194  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4195  currItem.valueCount % 2 == 0)
4196  {
4197  VMA_ASSERT(isString);
4198  }
4199 
4200  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4201  currItem.valueCount % 2 != 0)
4202  {
4203  m_SB.Add(": ");
4204  }
4205  else if(currItem.valueCount > 0)
4206  {
4207  m_SB.Add(", ");
4208  WriteIndent();
4209  }
4210  else
4211  {
4212  WriteIndent();
4213  }
4214  ++currItem.valueCount;
4215  }
4216 }
4217 
4218 void VmaJsonWriter::WriteIndent(bool oneLess)
4219 {
4220  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4221  {
4222  m_SB.AddNewLine();
4223 
4224  size_t count = m_Stack.size();
4225  if(count > 0 && oneLess)
4226  {
4227  --count;
4228  }
4229  for(size_t i = 0; i < count; ++i)
4230  {
4231  m_SB.Add(INDENT);
4232  }
4233  }
4234 }
4235 
4236 #endif // #if VMA_STATS_STRING_ENABLED
4237 
4239 
4240 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4241 {
4242  if(IsUserDataString())
4243  {
4244  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4245 
4246  FreeUserDataString(hAllocator);
4247 
4248  if(pUserData != VMA_NULL)
4249  {
4250  const char* const newStrSrc = (char*)pUserData;
4251  const size_t newStrLen = strlen(newStrSrc);
4252  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4253  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4254  m_pUserData = newStrDst;
4255  }
4256  }
4257  else
4258  {
4259  m_pUserData = pUserData;
4260  }
4261 }
4262 
4263 VkDeviceSize VmaAllocation_T::GetOffset() const
4264 {
4265  switch(m_Type)
4266  {
4267  case ALLOCATION_TYPE_BLOCK:
4268  return m_BlockAllocation.m_Offset;
4269  case ALLOCATION_TYPE_DEDICATED:
4270  return 0;
4271  default:
4272  VMA_ASSERT(0);
4273  return 0;
4274  }
4275 }
4276 
4277 VkDeviceMemory VmaAllocation_T::GetMemory() const
4278 {
4279  switch(m_Type)
4280  {
4281  case ALLOCATION_TYPE_BLOCK:
4282  return m_BlockAllocation.m_Block->m_hMemory;
4283  case ALLOCATION_TYPE_DEDICATED:
4284  return m_DedicatedAllocation.m_hMemory;
4285  default:
4286  VMA_ASSERT(0);
4287  return VK_NULL_HANDLE;
4288  }
4289 }
4290 
4291 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4292 {
4293  switch(m_Type)
4294  {
4295  case ALLOCATION_TYPE_BLOCK:
4296  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4297  case ALLOCATION_TYPE_DEDICATED:
4298  return m_DedicatedAllocation.m_MemoryTypeIndex;
4299  default:
4300  VMA_ASSERT(0);
4301  return UINT32_MAX;
4302  }
4303 }
4304 
4305 void* VmaAllocation_T::GetMappedData() const
4306 {
4307  switch(m_Type)
4308  {
4309  case ALLOCATION_TYPE_BLOCK:
4310  if(m_MapCount != 0)
4311  {
4312  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4313  VMA_ASSERT(pBlockData != VMA_NULL);
4314  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4315  }
4316  else
4317  {
4318  return VMA_NULL;
4319  }
4320  break;
4321  case ALLOCATION_TYPE_DEDICATED:
4322  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4323  return m_DedicatedAllocation.m_pMappedData;
4324  default:
4325  VMA_ASSERT(0);
4326  return VMA_NULL;
4327  }
4328 }
4329 
4330 bool VmaAllocation_T::CanBecomeLost() const
4331 {
4332  switch(m_Type)
4333  {
4334  case ALLOCATION_TYPE_BLOCK:
4335  return m_BlockAllocation.m_CanBecomeLost;
4336  case ALLOCATION_TYPE_DEDICATED:
4337  return false;
4338  default:
4339  VMA_ASSERT(0);
4340  return false;
4341  }
4342 }
4343 
4344 VmaPool VmaAllocation_T::GetPool() const
4345 {
4346  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4347  return m_BlockAllocation.m_hPool;
4348 }
4349 
4350 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4351 {
4352  VMA_ASSERT(CanBecomeLost());
4353 
4354  /*
4355  Warning: This is a carefully designed algorithm.
4356  Do not modify unless you really know what you're doing :)
4357  */
4358  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4359  for(;;)
4360  {
4361  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4362  {
4363  VMA_ASSERT(0);
4364  return false;
4365  }
4366  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4367  {
4368  return false;
4369  }
4370  else // Last use time earlier than current time.
4371  {
4372  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4373  {
4374  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4375  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4376  return true;
4377  }
4378  }
4379  }
4380 }
4381 
4382 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4383 {
4384  VMA_ASSERT(IsUserDataString());
4385  if(m_pUserData != VMA_NULL)
4386  {
4387  char* const oldStr = (char*)m_pUserData;
4388  const size_t oldStrLen = strlen(oldStr);
4389  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4390  m_pUserData = VMA_NULL;
4391  }
4392 }
4393 
4394 void VmaAllocation_T::BlockAllocMap()
4395 {
4396  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4397 
4398  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4399  {
4400  ++m_MapCount;
4401  }
4402  else
4403  {
4404  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4405  }
4406 }
4407 
4408 void VmaAllocation_T::BlockAllocUnmap()
4409 {
4410  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4411 
4412  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4413  {
4414  --m_MapCount;
4415  }
4416  else
4417  {
4418  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4419  }
4420 }
4421 
4422 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4423 {
4424  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4425 
4426  if(m_MapCount != 0)
4427  {
4428  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4429  {
4430  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4431  *ppData = m_DedicatedAllocation.m_pMappedData;
4432  ++m_MapCount;
4433  return VK_SUCCESS;
4434  }
4435  else
4436  {
4437  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4438  return VK_ERROR_MEMORY_MAP_FAILED;
4439  }
4440  }
4441  else
4442  {
4443  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4444  hAllocator->m_hDevice,
4445  m_DedicatedAllocation.m_hMemory,
4446  0, // offset
4447  VK_WHOLE_SIZE,
4448  0, // flags
4449  ppData);
4450  if(result == VK_SUCCESS)
4451  {
4452  m_DedicatedAllocation.m_pMappedData = *ppData;
4453  m_MapCount = 1;
4454  }
4455  return result;
4456  }
4457 }
4458 
4459 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4460 {
4461  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4462 
4463  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4464  {
4465  --m_MapCount;
4466  if(m_MapCount == 0)
4467  {
4468  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4469  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4470  hAllocator->m_hDevice,
4471  m_DedicatedAllocation.m_hMemory);
4472  }
4473  }
4474  else
4475  {
4476  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4477  }
4478 }
4479 
4480 #if VMA_STATS_STRING_ENABLED
4481 
4482 // Correspond to values of enum VmaSuballocationType.
4483 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4484  "FREE",
4485  "UNKNOWN",
4486  "BUFFER",
4487  "IMAGE_UNKNOWN",
4488  "IMAGE_LINEAR",
4489  "IMAGE_OPTIMAL",
4490 };
4491 
4492 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4493 {
4494  json.BeginObject();
4495 
4496  json.WriteString("Blocks");
4497  json.WriteNumber(stat.blockCount);
4498 
4499  json.WriteString("Allocations");
4500  json.WriteNumber(stat.allocationCount);
4501 
4502  json.WriteString("UnusedRanges");
4503  json.WriteNumber(stat.unusedRangeCount);
4504 
4505  json.WriteString("UsedBytes");
4506  json.WriteNumber(stat.usedBytes);
4507 
4508  json.WriteString("UnusedBytes");
4509  json.WriteNumber(stat.unusedBytes);
4510 
4511  if(stat.allocationCount > 1)
4512  {
4513  json.WriteString("AllocationSize");
4514  json.BeginObject(true);
4515  json.WriteString("Min");
4516  json.WriteNumber(stat.allocationSizeMin);
4517  json.WriteString("Avg");
4518  json.WriteNumber(stat.allocationSizeAvg);
4519  json.WriteString("Max");
4520  json.WriteNumber(stat.allocationSizeMax);
4521  json.EndObject();
4522  }
4523 
4524  if(stat.unusedRangeCount > 1)
4525  {
4526  json.WriteString("UnusedRangeSize");
4527  json.BeginObject(true);
4528  json.WriteString("Min");
4529  json.WriteNumber(stat.unusedRangeSizeMin);
4530  json.WriteString("Avg");
4531  json.WriteNumber(stat.unusedRangeSizeAvg);
4532  json.WriteString("Max");
4533  json.WriteNumber(stat.unusedRangeSizeMax);
4534  json.EndObject();
4535  }
4536 
4537  json.EndObject();
4538 }
4539 
4540 #endif // #if VMA_STATS_STRING_ENABLED
4541 
4542 struct VmaSuballocationItemSizeLess
4543 {
4544  bool operator()(
4545  const VmaSuballocationList::iterator lhs,
4546  const VmaSuballocationList::iterator rhs) const
4547  {
4548  return lhs->size < rhs->size;
4549  }
4550  bool operator()(
4551  const VmaSuballocationList::iterator lhs,
4552  VkDeviceSize rhsSize) const
4553  {
4554  return lhs->size < rhsSize;
4555  }
4556 };
4557 
4559 // class VmaBlockMetadata
4560 
4561 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4562  m_Size(0),
4563  m_FreeCount(0),
4564  m_SumFreeSize(0),
4565  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4566  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4567 {
4568 }
4569 
4570 VmaBlockMetadata::~VmaBlockMetadata()
4571 {
4572 }
4573 
4574 void VmaBlockMetadata::Init(VkDeviceSize size)
4575 {
4576  m_Size = size;
4577  m_FreeCount = 1;
4578  m_SumFreeSize = size;
4579 
4580  VmaSuballocation suballoc = {};
4581  suballoc.offset = 0;
4582  suballoc.size = size;
4583  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4584  suballoc.hAllocation = VK_NULL_HANDLE;
4585 
4586  m_Suballocations.push_back(suballoc);
4587  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4588  --suballocItem;
4589  m_FreeSuballocationsBySize.push_back(suballocItem);
4590 }
4591 
4592 bool VmaBlockMetadata::Validate() const
4593 {
4594  if(m_Suballocations.empty())
4595  {
4596  return false;
4597  }
4598 
4599  // Expected offset of new suballocation as calculates from previous ones.
4600  VkDeviceSize calculatedOffset = 0;
4601  // Expected number of free suballocations as calculated from traversing their list.
4602  uint32_t calculatedFreeCount = 0;
4603  // Expected sum size of free suballocations as calculated from traversing their list.
4604  VkDeviceSize calculatedSumFreeSize = 0;
4605  // Expected number of free suballocations that should be registered in
4606  // m_FreeSuballocationsBySize calculated from traversing their list.
4607  size_t freeSuballocationsToRegister = 0;
4608  // True if previous visisted suballocation was free.
4609  bool prevFree = false;
4610 
4611  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4612  suballocItem != m_Suballocations.cend();
4613  ++suballocItem)
4614  {
4615  const VmaSuballocation& subAlloc = *suballocItem;
4616 
4617  // Actual offset of this suballocation doesn't match expected one.
4618  if(subAlloc.offset != calculatedOffset)
4619  {
4620  return false;
4621  }
4622 
4623  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4624  // Two adjacent free suballocations are invalid. They should be merged.
4625  if(prevFree && currFree)
4626  {
4627  return false;
4628  }
4629  prevFree = currFree;
4630 
4631  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4632  {
4633  return false;
4634  }
4635 
4636  if(currFree)
4637  {
4638  calculatedSumFreeSize += subAlloc.size;
4639  ++calculatedFreeCount;
4640  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4641  {
4642  ++freeSuballocationsToRegister;
4643  }
4644  }
4645 
4646  calculatedOffset += subAlloc.size;
4647  }
4648 
4649  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4650  // match expected one.
4651  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4652  {
4653  return false;
4654  }
4655 
4656  VkDeviceSize lastSize = 0;
4657  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4658  {
4659  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4660 
4661  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4662  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4663  {
4664  return false;
4665  }
4666  // They must be sorted by size ascending.
4667  if(suballocItem->size < lastSize)
4668  {
4669  return false;
4670  }
4671 
4672  lastSize = suballocItem->size;
4673  }
4674 
4675  // Check if totals match calculacted values.
4676  return
4677  ValidateFreeSuballocationList() &&
4678  (calculatedOffset == m_Size) &&
4679  (calculatedSumFreeSize == m_SumFreeSize) &&
4680  (calculatedFreeCount == m_FreeCount);
4681 }
4682 
4683 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
4684 {
4685  if(!m_FreeSuballocationsBySize.empty())
4686  {
4687  return m_FreeSuballocationsBySize.back()->size;
4688  }
4689  else
4690  {
4691  return 0;
4692  }
4693 }
4694 
4695 bool VmaBlockMetadata::IsEmpty() const
4696 {
4697  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4698 }
4699 
4700 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
4701 {
4702  outInfo.blockCount = 1;
4703 
4704  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4705  outInfo.allocationCount = rangeCount - m_FreeCount;
4706  outInfo.unusedRangeCount = m_FreeCount;
4707 
4708  outInfo.unusedBytes = m_SumFreeSize;
4709  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
4710 
4711  outInfo.allocationSizeMin = UINT64_MAX;
4712  outInfo.allocationSizeMax = 0;
4713  outInfo.unusedRangeSizeMin = UINT64_MAX;
4714  outInfo.unusedRangeSizeMax = 0;
4715 
4716  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4717  suballocItem != m_Suballocations.cend();
4718  ++suballocItem)
4719  {
4720  const VmaSuballocation& suballoc = *suballocItem;
4721  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
4722  {
4723  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
4724  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
4725  }
4726  else
4727  {
4728  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
4729  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
4730  }
4731  }
4732 }
4733 
4734 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
4735 {
4736  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4737 
4738  inoutStats.size += m_Size;
4739  inoutStats.unusedSize += m_SumFreeSize;
4740  inoutStats.allocationCount += rangeCount - m_FreeCount;
4741  inoutStats.unusedRangeCount += m_FreeCount;
4742  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
4743 }
4744 
4745 #if VMA_STATS_STRING_ENABLED
4746 
4747 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
4748 {
4749  json.BeginObject();
4750 
4751  json.WriteString("TotalBytes");
4752  json.WriteNumber(m_Size);
4753 
4754  json.WriteString("UnusedBytes");
4755  json.WriteNumber(m_SumFreeSize);
4756 
4757  json.WriteString("Allocations");
4758  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4759 
4760  json.WriteString("UnusedRanges");
4761  json.WriteNumber(m_FreeCount);
4762 
4763  json.WriteString("Suballocations");
4764  json.BeginArray();
4765  size_t i = 0;
4766  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4767  suballocItem != m_Suballocations.cend();
4768  ++suballocItem, ++i)
4769  {
4770  json.BeginObject(true);
4771 
4772  json.WriteString("Type");
4773  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4774 
4775  json.WriteString("Size");
4776  json.WriteNumber(suballocItem->size);
4777 
4778  json.WriteString("Offset");
4779  json.WriteNumber(suballocItem->offset);
4780 
4781  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4782  {
4783  const void* pUserData = suballocItem->hAllocation->GetUserData();
4784  if(pUserData != VMA_NULL)
4785  {
4786  json.WriteString("UserData");
4787  if(suballocItem->hAllocation->IsUserDataString())
4788  {
4789  json.WriteString((const char*)pUserData);
4790  }
4791  else
4792  {
4793  json.BeginString();
4794  json.ContinueString_Pointer(pUserData);
4795  json.EndString();
4796  }
4797  }
4798  }
4799 
4800  json.EndObject();
4801  }
4802  json.EndArray();
4803 
4804  json.EndObject();
4805 }
4806 
4807 #endif // #if VMA_STATS_STRING_ENABLED
4808 
4809 /*
4810 How many suitable free suballocations to analyze before choosing best one.
4811 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4812  be chosen.
4813 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4814  suballocations will be analized and best one will be chosen.
4815 - Any other value is also acceptable.
4816 */
4817 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4818 
4819 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
4820 {
4821  VMA_ASSERT(IsEmpty());
4822  pAllocationRequest->offset = 0;
4823  pAllocationRequest->sumFreeSize = m_SumFreeSize;
4824  pAllocationRequest->sumItemSize = 0;
4825  pAllocationRequest->item = m_Suballocations.begin();
4826  pAllocationRequest->itemsToMakeLostCount = 0;
4827 }
4828 
4829 bool VmaBlockMetadata::CreateAllocationRequest(
4830  uint32_t currentFrameIndex,
4831  uint32_t frameInUseCount,
4832  VkDeviceSize bufferImageGranularity,
4833  VkDeviceSize allocSize,
4834  VkDeviceSize allocAlignment,
4835  VmaSuballocationType allocType,
4836  bool canMakeOtherLost,
4837  VmaAllocationRequest* pAllocationRequest)
4838 {
4839  VMA_ASSERT(allocSize > 0);
4840  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4841  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4842  VMA_HEAVY_ASSERT(Validate());
4843 
4844  // There is not enough total free space in this block to fullfill the request: Early return.
4845  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4846  {
4847  return false;
4848  }
4849 
4850  // New algorithm, efficiently searching freeSuballocationsBySize.
4851  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4852  if(freeSuballocCount > 0)
4853  {
4854  if(VMA_BEST_FIT)
4855  {
4856  // Find first free suballocation with size not less than allocSize.
4857  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4858  m_FreeSuballocationsBySize.data(),
4859  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4860  allocSize,
4861  VmaSuballocationItemSizeLess());
4862  size_t index = it - m_FreeSuballocationsBySize.data();
4863  for(; index < freeSuballocCount; ++index)
4864  {
4865  if(CheckAllocation(
4866  currentFrameIndex,
4867  frameInUseCount,
4868  bufferImageGranularity,
4869  allocSize,
4870  allocAlignment,
4871  allocType,
4872  m_FreeSuballocationsBySize[index],
4873  false, // canMakeOtherLost
4874  &pAllocationRequest->offset,
4875  &pAllocationRequest->itemsToMakeLostCount,
4876  &pAllocationRequest->sumFreeSize,
4877  &pAllocationRequest->sumItemSize))
4878  {
4879  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4880  return true;
4881  }
4882  }
4883  }
4884  else
4885  {
4886  // Search staring from biggest suballocations.
4887  for(size_t index = freeSuballocCount; index--; )
4888  {
4889  if(CheckAllocation(
4890  currentFrameIndex,
4891  frameInUseCount,
4892  bufferImageGranularity,
4893  allocSize,
4894  allocAlignment,
4895  allocType,
4896  m_FreeSuballocationsBySize[index],
4897  false, // canMakeOtherLost
4898  &pAllocationRequest->offset,
4899  &pAllocationRequest->itemsToMakeLostCount,
4900  &pAllocationRequest->sumFreeSize,
4901  &pAllocationRequest->sumItemSize))
4902  {
4903  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4904  return true;
4905  }
4906  }
4907  }
4908  }
4909 
4910  if(canMakeOtherLost)
4911  {
4912  // Brute-force algorithm. TODO: Come up with something better.
4913 
4914  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4915  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4916 
4917  VmaAllocationRequest tmpAllocRequest = {};
4918  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4919  suballocIt != m_Suballocations.end();
4920  ++suballocIt)
4921  {
4922  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4923  suballocIt->hAllocation->CanBecomeLost())
4924  {
4925  if(CheckAllocation(
4926  currentFrameIndex,
4927  frameInUseCount,
4928  bufferImageGranularity,
4929  allocSize,
4930  allocAlignment,
4931  allocType,
4932  suballocIt,
4933  canMakeOtherLost,
4934  &tmpAllocRequest.offset,
4935  &tmpAllocRequest.itemsToMakeLostCount,
4936  &tmpAllocRequest.sumFreeSize,
4937  &tmpAllocRequest.sumItemSize))
4938  {
4939  tmpAllocRequest.item = suballocIt;
4940 
4941  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4942  {
4943  *pAllocationRequest = tmpAllocRequest;
4944  }
4945  }
4946  }
4947  }
4948 
4949  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4950  {
4951  return true;
4952  }
4953  }
4954 
4955  return false;
4956 }
4957 
4958 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
4959  uint32_t currentFrameIndex,
4960  uint32_t frameInUseCount,
4961  VmaAllocationRequest* pAllocationRequest)
4962 {
4963  while(pAllocationRequest->itemsToMakeLostCount > 0)
4964  {
4965  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4966  {
4967  ++pAllocationRequest->item;
4968  }
4969  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4970  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4971  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4972  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4973  {
4974  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4975  --pAllocationRequest->itemsToMakeLostCount;
4976  }
4977  else
4978  {
4979  return false;
4980  }
4981  }
4982 
4983  VMA_HEAVY_ASSERT(Validate());
4984  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4985  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4986 
4987  return true;
4988 }
4989 
4990 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4991 {
4992  uint32_t lostAllocationCount = 0;
4993  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4994  it != m_Suballocations.end();
4995  ++it)
4996  {
4997  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4998  it->hAllocation->CanBecomeLost() &&
4999  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5000  {
5001  it = FreeSuballocation(it);
5002  ++lostAllocationCount;
5003  }
5004  }
5005  return lostAllocationCount;
5006 }
5007 
5008 void VmaBlockMetadata::Alloc(
5009  const VmaAllocationRequest& request,
5010  VmaSuballocationType type,
5011  VkDeviceSize allocSize,
5012  VmaAllocation hAllocation)
5013 {
5014  VMA_ASSERT(request.item != m_Suballocations.end());
5015  VmaSuballocation& suballoc = *request.item;
5016  // Given suballocation is a free block.
5017  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5018  // Given offset is inside this suballocation.
5019  VMA_ASSERT(request.offset >= suballoc.offset);
5020  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5021  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5022  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5023 
5024  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5025  // it to become used.
5026  UnregisterFreeSuballocation(request.item);
5027 
5028  suballoc.offset = request.offset;
5029  suballoc.size = allocSize;
5030  suballoc.type = type;
5031  suballoc.hAllocation = hAllocation;
5032 
5033  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5034  if(paddingEnd)
5035  {
5036  VmaSuballocation paddingSuballoc = {};
5037  paddingSuballoc.offset = request.offset + allocSize;
5038  paddingSuballoc.size = paddingEnd;
5039  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5040  VmaSuballocationList::iterator next = request.item;
5041  ++next;
5042  const VmaSuballocationList::iterator paddingEndItem =
5043  m_Suballocations.insert(next, paddingSuballoc);
5044  RegisterFreeSuballocation(paddingEndItem);
5045  }
5046 
5047  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5048  if(paddingBegin)
5049  {
5050  VmaSuballocation paddingSuballoc = {};
5051  paddingSuballoc.offset = request.offset - paddingBegin;
5052  paddingSuballoc.size = paddingBegin;
5053  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5054  const VmaSuballocationList::iterator paddingBeginItem =
5055  m_Suballocations.insert(request.item, paddingSuballoc);
5056  RegisterFreeSuballocation(paddingBeginItem);
5057  }
5058 
5059  // Update totals.
5060  m_FreeCount = m_FreeCount - 1;
5061  if(paddingBegin > 0)
5062  {
5063  ++m_FreeCount;
5064  }
5065  if(paddingEnd > 0)
5066  {
5067  ++m_FreeCount;
5068  }
5069  m_SumFreeSize -= allocSize;
5070 }
5071 
5072 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5073 {
5074  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5075  suballocItem != m_Suballocations.end();
5076  ++suballocItem)
5077  {
5078  VmaSuballocation& suballoc = *suballocItem;
5079  if(suballoc.hAllocation == allocation)
5080  {
5081  FreeSuballocation(suballocItem);
5082  VMA_HEAVY_ASSERT(Validate());
5083  return;
5084  }
5085  }
5086  VMA_ASSERT(0 && "Not found!");
5087 }
5088 
5089 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5090 {
5091  VkDeviceSize lastSize = 0;
5092  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5093  {
5094  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5095 
5096  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5097  {
5098  VMA_ASSERT(0);
5099  return false;
5100  }
5101  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5102  {
5103  VMA_ASSERT(0);
5104  return false;
5105  }
5106  if(it->size < lastSize)
5107  {
5108  VMA_ASSERT(0);
5109  return false;
5110  }
5111 
5112  lastSize = it->size;
5113  }
5114  return true;
5115 }
5116 
5117 bool VmaBlockMetadata::CheckAllocation(
5118  uint32_t currentFrameIndex,
5119  uint32_t frameInUseCount,
5120  VkDeviceSize bufferImageGranularity,
5121  VkDeviceSize allocSize,
5122  VkDeviceSize allocAlignment,
5123  VmaSuballocationType allocType,
5124  VmaSuballocationList::const_iterator suballocItem,
5125  bool canMakeOtherLost,
5126  VkDeviceSize* pOffset,
5127  size_t* itemsToMakeLostCount,
5128  VkDeviceSize* pSumFreeSize,
5129  VkDeviceSize* pSumItemSize) const
5130 {
5131  VMA_ASSERT(allocSize > 0);
5132  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5133  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5134  VMA_ASSERT(pOffset != VMA_NULL);
5135 
5136  *itemsToMakeLostCount = 0;
5137  *pSumFreeSize = 0;
5138  *pSumItemSize = 0;
5139 
5140  if(canMakeOtherLost)
5141  {
5142  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5143  {
5144  *pSumFreeSize = suballocItem->size;
5145  }
5146  else
5147  {
5148  if(suballocItem->hAllocation->CanBecomeLost() &&
5149  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5150  {
5151  ++*itemsToMakeLostCount;
5152  *pSumItemSize = suballocItem->size;
5153  }
5154  else
5155  {
5156  return false;
5157  }
5158  }
5159 
5160  // Remaining size is too small for this request: Early return.
5161  if(m_Size - suballocItem->offset < allocSize)
5162  {
5163  return false;
5164  }
5165 
5166  // Start from offset equal to beginning of this suballocation.
5167  *pOffset = suballocItem->offset;
5168 
5169  // Apply VMA_DEBUG_MARGIN at the beginning.
5170  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5171  {
5172  *pOffset += VMA_DEBUG_MARGIN;
5173  }
5174 
5175  // Apply alignment.
5176  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5177  *pOffset = VmaAlignUp(*pOffset, alignment);
5178 
5179  // Check previous suballocations for BufferImageGranularity conflicts.
5180  // Make bigger alignment if necessary.
5181  if(bufferImageGranularity > 1)
5182  {
5183  bool bufferImageGranularityConflict = false;
5184  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5185  while(prevSuballocItem != m_Suballocations.cbegin())
5186  {
5187  --prevSuballocItem;
5188  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5189  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5190  {
5191  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5192  {
5193  bufferImageGranularityConflict = true;
5194  break;
5195  }
5196  }
5197  else
5198  // Already on previous page.
5199  break;
5200  }
5201  if(bufferImageGranularityConflict)
5202  {
5203  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5204  }
5205  }
5206 
5207  // Now that we have final *pOffset, check if we are past suballocItem.
5208  // If yes, return false - this function should be called for another suballocItem as starting point.
5209  if(*pOffset >= suballocItem->offset + suballocItem->size)
5210  {
5211  return false;
5212  }
5213 
5214  // Calculate padding at the beginning based on current offset.
5215  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5216 
5217  // Calculate required margin at the end if this is not last suballocation.
5218  VmaSuballocationList::const_iterator next = suballocItem;
5219  ++next;
5220  const VkDeviceSize requiredEndMargin =
5221  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5222 
5223  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5224  // Another early return check.
5225  if(suballocItem->offset + totalSize > m_Size)
5226  {
5227  return false;
5228  }
5229 
5230  // Advance lastSuballocItem until desired size is reached.
5231  // Update itemsToMakeLostCount.
5232  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5233  if(totalSize > suballocItem->size)
5234  {
5235  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5236  while(remainingSize > 0)
5237  {
5238  ++lastSuballocItem;
5239  if(lastSuballocItem == m_Suballocations.cend())
5240  {
5241  return false;
5242  }
5243  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5244  {
5245  *pSumFreeSize += lastSuballocItem->size;
5246  }
5247  else
5248  {
5249  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5250  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5251  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5252  {
5253  ++*itemsToMakeLostCount;
5254  *pSumItemSize += lastSuballocItem->size;
5255  }
5256  else
5257  {
5258  return false;
5259  }
5260  }
5261  remainingSize = (lastSuballocItem->size < remainingSize) ?
5262  remainingSize - lastSuballocItem->size : 0;
5263  }
5264  }
5265 
5266  // Check next suballocations for BufferImageGranularity conflicts.
5267  // If conflict exists, we must mark more allocations lost or fail.
5268  if(bufferImageGranularity > 1)
5269  {
5270  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5271  ++nextSuballocItem;
5272  while(nextSuballocItem != m_Suballocations.cend())
5273  {
5274  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5275  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5276  {
5277  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5278  {
5279  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5280  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5281  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5282  {
5283  ++*itemsToMakeLostCount;
5284  }
5285  else
5286  {
5287  return false;
5288  }
5289  }
5290  }
5291  else
5292  {
5293  // Already on next page.
5294  break;
5295  }
5296  ++nextSuballocItem;
5297  }
5298  }
5299  }
5300  else
5301  {
5302  const VmaSuballocation& suballoc = *suballocItem;
5303  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5304 
5305  *pSumFreeSize = suballoc.size;
5306 
5307  // Size of this suballocation is too small for this request: Early return.
5308  if(suballoc.size < allocSize)
5309  {
5310  return false;
5311  }
5312 
5313  // Start from offset equal to beginning of this suballocation.
5314  *pOffset = suballoc.offset;
5315 
5316  // Apply VMA_DEBUG_MARGIN at the beginning.
5317  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5318  {
5319  *pOffset += VMA_DEBUG_MARGIN;
5320  }
5321 
5322  // Apply alignment.
5323  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5324  *pOffset = VmaAlignUp(*pOffset, alignment);
5325 
5326  // Check previous suballocations for BufferImageGranularity conflicts.
5327  // Make bigger alignment if necessary.
5328  if(bufferImageGranularity > 1)
5329  {
5330  bool bufferImageGranularityConflict = false;
5331  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5332  while(prevSuballocItem != m_Suballocations.cbegin())
5333  {
5334  --prevSuballocItem;
5335  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5336  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5337  {
5338  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5339  {
5340  bufferImageGranularityConflict = true;
5341  break;
5342  }
5343  }
5344  else
5345  // Already on previous page.
5346  break;
5347  }
5348  if(bufferImageGranularityConflict)
5349  {
5350  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5351  }
5352  }
5353 
5354  // Calculate padding at the beginning based on current offset.
5355  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5356 
5357  // Calculate required margin at the end if this is not last suballocation.
5358  VmaSuballocationList::const_iterator next = suballocItem;
5359  ++next;
5360  const VkDeviceSize requiredEndMargin =
5361  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5362 
5363  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5364  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5365  {
5366  return false;
5367  }
5368 
5369  // Check next suballocations for BufferImageGranularity conflicts.
5370  // If conflict exists, allocation cannot be made here.
5371  if(bufferImageGranularity > 1)
5372  {
5373  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5374  ++nextSuballocItem;
5375  while(nextSuballocItem != m_Suballocations.cend())
5376  {
5377  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5378  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5379  {
5380  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5381  {
5382  return false;
5383  }
5384  }
5385  else
5386  {
5387  // Already on next page.
5388  break;
5389  }
5390  ++nextSuballocItem;
5391  }
5392  }
5393  }
5394 
5395  // All tests passed: Success. pOffset is already filled.
5396  return true;
5397 }
5398 
5399 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5400 {
5401  VMA_ASSERT(item != m_Suballocations.end());
5402  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5403 
5404  VmaSuballocationList::iterator nextItem = item;
5405  ++nextItem;
5406  VMA_ASSERT(nextItem != m_Suballocations.end());
5407  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5408 
5409  item->size += nextItem->size;
5410  --m_FreeCount;
5411  m_Suballocations.erase(nextItem);
5412 }
5413 
5414 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5415 {
5416  // Change this suballocation to be marked as free.
5417  VmaSuballocation& suballoc = *suballocItem;
5418  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5419  suballoc.hAllocation = VK_NULL_HANDLE;
5420 
5421  // Update totals.
5422  ++m_FreeCount;
5423  m_SumFreeSize += suballoc.size;
5424 
5425  // Merge with previous and/or next suballocation if it's also free.
5426  bool mergeWithNext = false;
5427  bool mergeWithPrev = false;
5428 
5429  VmaSuballocationList::iterator nextItem = suballocItem;
5430  ++nextItem;
5431  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5432  {
5433  mergeWithNext = true;
5434  }
5435 
5436  VmaSuballocationList::iterator prevItem = suballocItem;
5437  if(suballocItem != m_Suballocations.begin())
5438  {
5439  --prevItem;
5440  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5441  {
5442  mergeWithPrev = true;
5443  }
5444  }
5445 
5446  if(mergeWithNext)
5447  {
5448  UnregisterFreeSuballocation(nextItem);
5449  MergeFreeWithNext(suballocItem);
5450  }
5451 
5452  if(mergeWithPrev)
5453  {
5454  UnregisterFreeSuballocation(prevItem);
5455  MergeFreeWithNext(prevItem);
5456  RegisterFreeSuballocation(prevItem);
5457  return prevItem;
5458  }
5459  else
5460  {
5461  RegisterFreeSuballocation(suballocItem);
5462  return suballocItem;
5463  }
5464 }
5465 
5466 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5467 {
5468  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5469  VMA_ASSERT(item->size > 0);
5470 
5471  // You may want to enable this validation at the beginning or at the end of
5472  // this function, depending on what do you want to check.
5473  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5474 
5475  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5476  {
5477  if(m_FreeSuballocationsBySize.empty())
5478  {
5479  m_FreeSuballocationsBySize.push_back(item);
5480  }
5481  else
5482  {
5483  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5484  }
5485  }
5486 
5487  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5488 }
5489 
5490 
5491 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5492 {
5493  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5494  VMA_ASSERT(item->size > 0);
5495 
5496  // You may want to enable this validation at the beginning or at the end of
5497  // this function, depending on what do you want to check.
5498  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5499 
5500  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5501  {
5502  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5503  m_FreeSuballocationsBySize.data(),
5504  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5505  item,
5506  VmaSuballocationItemSizeLess());
5507  for(size_t index = it - m_FreeSuballocationsBySize.data();
5508  index < m_FreeSuballocationsBySize.size();
5509  ++index)
5510  {
5511  if(m_FreeSuballocationsBySize[index] == item)
5512  {
5513  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5514  return;
5515  }
5516  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5517  }
5518  VMA_ASSERT(0 && "Not found.");
5519  }
5520 
5521  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5522 }
5523 
5525 // class VmaDeviceMemoryMapping
5526 
5527 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
5528  m_MapCount(0),
5529  m_pMappedData(VMA_NULL)
5530 {
5531 }
5532 
5533 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
5534 {
5535  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5536 }
5537 
5538 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData)
5539 {
5540  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5541  if(m_MapCount != 0)
5542  {
5543  ++m_MapCount;
5544  VMA_ASSERT(m_pMappedData != VMA_NULL);
5545  if(ppData != VMA_NULL)
5546  {
5547  *ppData = m_pMappedData;
5548  }
5549  return VK_SUCCESS;
5550  }
5551  else
5552  {
5553  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5554  hAllocator->m_hDevice,
5555  hMemory,
5556  0, // offset
5557  VK_WHOLE_SIZE,
5558  0, // flags
5559  &m_pMappedData);
5560  if(result == VK_SUCCESS)
5561  {
5562  if(ppData != VMA_NULL)
5563  {
5564  *ppData = m_pMappedData;
5565  }
5566  m_MapCount = 1;
5567  }
5568  return result;
5569  }
5570 }
5571 
5572 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory)
5573 {
5574  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5575  if(m_MapCount != 0)
5576  {
5577  if(--m_MapCount == 0)
5578  {
5579  m_pMappedData = VMA_NULL;
5580  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
5581  }
5582  }
5583  else
5584  {
5585  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
5586  }
5587 }
5588 
5590 // class VmaDeviceMemoryBlock
5591 
5592 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5593  m_MemoryTypeIndex(UINT32_MAX),
5594  m_hMemory(VK_NULL_HANDLE),
5595  m_Metadata(hAllocator)
5596 {
5597 }
5598 
5599 void VmaDeviceMemoryBlock::Init(
5600  uint32_t newMemoryTypeIndex,
5601  VkDeviceMemory newMemory,
5602  VkDeviceSize newSize)
5603 {
5604  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5605 
5606  m_MemoryTypeIndex = newMemoryTypeIndex;
5607  m_hMemory = newMemory;
5608 
5609  m_Metadata.Init(newSize);
5610 }
5611 
5612 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
5613 {
5614  // This is the most important assert in the entire library.
5615  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
5616  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
5617 
5618  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
5619  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
5620  m_hMemory = VK_NULL_HANDLE;
5621 }
5622 
5623 bool VmaDeviceMemoryBlock::Validate() const
5624 {
5625  if((m_hMemory == VK_NULL_HANDLE) ||
5626  (m_Metadata.GetSize() == 0))
5627  {
5628  return false;
5629  }
5630 
5631  return m_Metadata.Validate();
5632 }
5633 
5634 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, void** ppData)
5635 {
5636  return m_Mapping.Map(hAllocator, m_hMemory, ppData);
5637 }
5638 
5639 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator)
5640 {
5641  m_Mapping.Unmap(hAllocator, m_hMemory);
5642 }
5643 
5644 static void InitStatInfo(VmaStatInfo& outInfo)
5645 {
5646  memset(&outInfo, 0, sizeof(outInfo));
5647  outInfo.allocationSizeMin = UINT64_MAX;
5648  outInfo.unusedRangeSizeMin = UINT64_MAX;
5649 }
5650 
5651 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5652 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5653 {
5654  inoutInfo.blockCount += srcInfo.blockCount;
5655  inoutInfo.allocationCount += srcInfo.allocationCount;
5656  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
5657  inoutInfo.usedBytes += srcInfo.usedBytes;
5658  inoutInfo.unusedBytes += srcInfo.unusedBytes;
5659  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
5660  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
5661  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
5662  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
5663 }
5664 
5665 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5666 {
5667  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
5668  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
5669  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
5670  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
5671 }
5672 
5673 VmaPool_T::VmaPool_T(
5674  VmaAllocator hAllocator,
5675  const VmaPoolCreateInfo& createInfo) :
5676  m_BlockVector(
5677  hAllocator,
5678  createInfo.memoryTypeIndex,
5679  createInfo.blockSize,
5680  createInfo.minBlockCount,
5681  createInfo.maxBlockCount,
5682  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5683  createInfo.frameInUseCount,
5684  true) // isCustomPool
5685 {
5686 }
5687 
5688 VmaPool_T::~VmaPool_T()
5689 {
5690 }
5691 
5692 #if VMA_STATS_STRING_ENABLED
5693 
5694 #endif // #if VMA_STATS_STRING_ENABLED
5695 
5696 VmaBlockVector::VmaBlockVector(
5697  VmaAllocator hAllocator,
5698  uint32_t memoryTypeIndex,
5699  VkDeviceSize preferredBlockSize,
5700  size_t minBlockCount,
5701  size_t maxBlockCount,
5702  VkDeviceSize bufferImageGranularity,
5703  uint32_t frameInUseCount,
5704  bool isCustomPool) :
5705  m_hAllocator(hAllocator),
5706  m_MemoryTypeIndex(memoryTypeIndex),
5707  m_PreferredBlockSize(preferredBlockSize),
5708  m_MinBlockCount(minBlockCount),
5709  m_MaxBlockCount(maxBlockCount),
5710  m_BufferImageGranularity(bufferImageGranularity),
5711  m_FrameInUseCount(frameInUseCount),
5712  m_IsCustomPool(isCustomPool),
5713  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5714  m_HasEmptyBlock(false),
5715  m_pDefragmentator(VMA_NULL)
5716 {
5717 }
5718 
5719 VmaBlockVector::~VmaBlockVector()
5720 {
5721  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5722 
5723  for(size_t i = m_Blocks.size(); i--; )
5724  {
5725  m_Blocks[i]->Destroy(m_hAllocator);
5726  vma_delete(m_hAllocator, m_Blocks[i]);
5727  }
5728 }
5729 
5730 VkResult VmaBlockVector::CreateMinBlocks()
5731 {
5732  for(size_t i = 0; i < m_MinBlockCount; ++i)
5733  {
5734  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5735  if(res != VK_SUCCESS)
5736  {
5737  return res;
5738  }
5739  }
5740  return VK_SUCCESS;
5741 }
5742 
5743 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5744 {
5745  pStats->size = 0;
5746  pStats->unusedSize = 0;
5747  pStats->allocationCount = 0;
5748  pStats->unusedRangeCount = 0;
5749  pStats->unusedRangeSizeMax = 0;
5750 
5751  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5752 
5753  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5754  {
5755  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5756  VMA_ASSERT(pBlock);
5757  VMA_HEAVY_ASSERT(pBlock->Validate());
5758  pBlock->m_Metadata.AddPoolStats(*pStats);
5759  }
5760 }
5761 
5762 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5763 
5764 VkResult VmaBlockVector::Allocate(
5765  VmaPool hCurrentPool,
5766  uint32_t currentFrameIndex,
5767  const VkMemoryRequirements& vkMemReq,
5768  const VmaAllocationCreateInfo& createInfo,
5769  VmaSuballocationType suballocType,
5770  VmaAllocation* pAllocation)
5771 {
5772  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
5773  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
5774 
5775  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5776 
5777  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5778  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5779  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5780  {
5781  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5782  VMA_ASSERT(pCurrBlock);
5783  VmaAllocationRequest currRequest = {};
5784  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5785  currentFrameIndex,
5786  m_FrameInUseCount,
5787  m_BufferImageGranularity,
5788  vkMemReq.size,
5789  vkMemReq.alignment,
5790  suballocType,
5791  false, // canMakeOtherLost
5792  &currRequest))
5793  {
5794  // Allocate from pCurrBlock.
5795  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5796 
5797  if(mapped)
5798  {
5799  VkResult res = pCurrBlock->Map(m_hAllocator, nullptr);
5800  if(res != VK_SUCCESS)
5801  {
5802  return res;
5803  }
5804  }
5805 
5806  // We no longer have an empty Allocation.
5807  if(pCurrBlock->m_Metadata.IsEmpty())
5808  {
5809  m_HasEmptyBlock = false;
5810  }
5811 
5812  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
5813  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5814  (*pAllocation)->InitBlockAllocation(
5815  hCurrentPool,
5816  pCurrBlock,
5817  currRequest.offset,
5818  vkMemReq.alignment,
5819  vkMemReq.size,
5820  suballocType,
5821  mapped,
5822  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5823  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5824  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5825  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
5826  return VK_SUCCESS;
5827  }
5828  }
5829 
5830  const bool canCreateNewBlock =
5831  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5832  (m_Blocks.size() < m_MaxBlockCount);
5833 
5834  // 2. Try to create new block.
5835  if(canCreateNewBlock)
5836  {
5837  // 2.1. Start with full preferredBlockSize.
5838  VkDeviceSize blockSize = m_PreferredBlockSize;
5839  size_t newBlockIndex = 0;
5840  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5841  // Allocating blocks of other sizes is allowed only in default pools.
5842  // In custom pools block size is fixed.
5843  if(res < 0 && m_IsCustomPool == false)
5844  {
5845  // 2.2. Try half the size.
5846  blockSize /= 2;
5847  if(blockSize >= vkMemReq.size)
5848  {
5849  res = CreateBlock(blockSize, &newBlockIndex);
5850  if(res < 0)
5851  {
5852  // 2.3. Try quarter the size.
5853  blockSize /= 2;
5854  if(blockSize >= vkMemReq.size)
5855  {
5856  res = CreateBlock(blockSize, &newBlockIndex);
5857  }
5858  }
5859  }
5860  }
5861  if(res == VK_SUCCESS)
5862  {
5863  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5864  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
5865 
5866  if(mapped)
5867  {
5868  res = pBlock->Map(m_hAllocator, nullptr);
5869  if(res != VK_SUCCESS)
5870  {
5871  return res;
5872  }
5873  }
5874 
5875  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5876  VmaAllocationRequest allocRequest;
5877  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
5878  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
5879  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5880  (*pAllocation)->InitBlockAllocation(
5881  hCurrentPool,
5882  pBlock,
5883  allocRequest.offset,
5884  vkMemReq.alignment,
5885  vkMemReq.size,
5886  suballocType,
5887  mapped,
5888  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5889  VMA_HEAVY_ASSERT(pBlock->Validate());
5890  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5891  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
5892  return VK_SUCCESS;
5893  }
5894  }
5895 
5896  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5897 
5898  // 3. Try to allocate from existing blocks with making other allocations lost.
5899  if(canMakeOtherLost)
5900  {
5901  uint32_t tryIndex = 0;
5902  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5903  {
5904  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5905  VmaAllocationRequest bestRequest = {};
5906  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5907 
5908  // 1. Search existing allocations.
5909  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5910  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5911  {
5912  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5913  VMA_ASSERT(pCurrBlock);
5914  VmaAllocationRequest currRequest = {};
5915  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5916  currentFrameIndex,
5917  m_FrameInUseCount,
5918  m_BufferImageGranularity,
5919  vkMemReq.size,
5920  vkMemReq.alignment,
5921  suballocType,
5922  canMakeOtherLost,
5923  &currRequest))
5924  {
5925  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5926  if(pBestRequestBlock == VMA_NULL ||
5927  currRequestCost < bestRequestCost)
5928  {
5929  pBestRequestBlock = pCurrBlock;
5930  bestRequest = currRequest;
5931  bestRequestCost = currRequestCost;
5932 
5933  if(bestRequestCost == 0)
5934  {
5935  break;
5936  }
5937  }
5938  }
5939  }
5940 
5941  if(pBestRequestBlock != VMA_NULL)
5942  {
5943  if(mapped)
5944  {
5945  VkResult res = pBestRequestBlock->Map(m_hAllocator, nullptr);
5946  if(res != VK_SUCCESS)
5947  {
5948  return res;
5949  }
5950  }
5951 
5952  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
5953  currentFrameIndex,
5954  m_FrameInUseCount,
5955  &bestRequest))
5956  {
5957  // We no longer have an empty Allocation.
5958  if(pBestRequestBlock->m_Metadata.IsEmpty())
5959  {
5960  m_HasEmptyBlock = false;
5961  }
5962  // Allocate from this pBlock.
5963  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
5964  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5965  (*pAllocation)->InitBlockAllocation(
5966  hCurrentPool,
5967  pBestRequestBlock,
5968  bestRequest.offset,
5969  vkMemReq.alignment,
5970  vkMemReq.size,
5971  suballocType,
5972  mapped,
5973  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5974  VMA_HEAVY_ASSERT(pBlock->Validate());
5975  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5976  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
5977  return VK_SUCCESS;
5978  }
5979  // else: Some allocations must have been touched while we are here. Next try.
5980  }
5981  else
5982  {
5983  // Could not find place in any of the blocks - break outer loop.
5984  break;
5985  }
5986  }
5987  /* Maximum number of tries exceeded - a very unlike event when many other
5988  threads are simultaneously touching allocations making it impossible to make
5989  lost at the same time as we try to allocate. */
5990  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5991  {
5992  return VK_ERROR_TOO_MANY_OBJECTS;
5993  }
5994  }
5995 
5996  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5997 }
5998 
5999 void VmaBlockVector::Free(
6000  VmaAllocation hAllocation)
6001 {
6002  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6003 
6004  // Scope for lock.
6005  {
6006  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6007 
6008  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6009 
6010  if(hAllocation->IsPersistentMap())
6011  {
6012  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory);
6013  }
6014 
6015  pBlock->m_Metadata.Free(hAllocation);
6016  VMA_HEAVY_ASSERT(pBlock->Validate());
6017 
6018  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6019 
6020  // pBlock became empty after this deallocation.
6021  if(pBlock->m_Metadata.IsEmpty())
6022  {
6023  // Already has empty Allocation. We don't want to have two, so delete this one.
6024  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6025  {
6026  pBlockToDelete = pBlock;
6027  Remove(pBlock);
6028  }
6029  // We now have first empty Allocation.
6030  else
6031  {
6032  m_HasEmptyBlock = true;
6033  }
6034  }
6035  // pBlock didn't become empty, but we have another empty block - find and free that one.
6036  // (This is optional, heuristics.)
6037  else if(m_HasEmptyBlock)
6038  {
6039  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6040  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6041  {
6042  pBlockToDelete = pLastBlock;
6043  m_Blocks.pop_back();
6044  m_HasEmptyBlock = false;
6045  }
6046  }
6047 
6048  IncrementallySortBlocks();
6049  }
6050 
6051  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6052  // lock, for performance reason.
6053  if(pBlockToDelete != VMA_NULL)
6054  {
6055  VMA_DEBUG_LOG(" Deleted empty allocation");
6056  pBlockToDelete->Destroy(m_hAllocator);
6057  vma_delete(m_hAllocator, pBlockToDelete);
6058  }
6059 }
6060 
6061 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6062 {
6063  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6064  {
6065  if(m_Blocks[blockIndex] == pBlock)
6066  {
6067  VmaVectorRemove(m_Blocks, blockIndex);
6068  return;
6069  }
6070  }
6071  VMA_ASSERT(0);
6072 }
6073 
6074 void VmaBlockVector::IncrementallySortBlocks()
6075 {
6076  // Bubble sort only until first swap.
6077  for(size_t i = 1; i < m_Blocks.size(); ++i)
6078  {
6079  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6080  {
6081  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6082  return;
6083  }
6084  }
6085 }
6086 
6087 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6088 {
6089  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6090  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6091  allocInfo.allocationSize = blockSize;
6092  VkDeviceMemory mem = VK_NULL_HANDLE;
6093  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6094  if(res < 0)
6095  {
6096  return res;
6097  }
6098 
6099  // New VkDeviceMemory successfully created.
6100 
6101  // Create new Allocation for it.
6102  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6103  pBlock->Init(
6104  m_MemoryTypeIndex,
6105  mem,
6106  allocInfo.allocationSize);
6107 
6108  m_Blocks.push_back(pBlock);
6109  if(pNewBlockIndex != VMA_NULL)
6110  {
6111  *pNewBlockIndex = m_Blocks.size() - 1;
6112  }
6113 
6114  return VK_SUCCESS;
6115 }
6116 
6117 #if VMA_STATS_STRING_ENABLED
6118 
6119 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6120 {
6121  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6122 
6123  json.BeginObject();
6124 
6125  if(m_IsCustomPool)
6126  {
6127  json.WriteString("MemoryTypeIndex");
6128  json.WriteNumber(m_MemoryTypeIndex);
6129 
6130  json.WriteString("BlockSize");
6131  json.WriteNumber(m_PreferredBlockSize);
6132 
6133  json.WriteString("BlockCount");
6134  json.BeginObject(true);
6135  if(m_MinBlockCount > 0)
6136  {
6137  json.WriteString("Min");
6138  json.WriteNumber(m_MinBlockCount);
6139  }
6140  if(m_MaxBlockCount < SIZE_MAX)
6141  {
6142  json.WriteString("Max");
6143  json.WriteNumber(m_MaxBlockCount);
6144  }
6145  json.WriteString("Cur");
6146  json.WriteNumber(m_Blocks.size());
6147  json.EndObject();
6148 
6149  if(m_FrameInUseCount > 0)
6150  {
6151  json.WriteString("FrameInUseCount");
6152  json.WriteNumber(m_FrameInUseCount);
6153  }
6154  }
6155  else
6156  {
6157  json.WriteString("PreferredBlockSize");
6158  json.WriteNumber(m_PreferredBlockSize);
6159  }
6160 
6161  json.WriteString("Blocks");
6162  json.BeginArray();
6163  for(size_t i = 0; i < m_Blocks.size(); ++i)
6164  {
6165  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6166  }
6167  json.EndArray();
6168 
6169  json.EndObject();
6170 }
6171 
6172 #endif // #if VMA_STATS_STRING_ENABLED
6173 
6174 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6175  VmaAllocator hAllocator,
6176  uint32_t currentFrameIndex)
6177 {
6178  if(m_pDefragmentator == VMA_NULL)
6179  {
6180  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6181  hAllocator,
6182  this,
6183  currentFrameIndex);
6184  }
6185 
6186  return m_pDefragmentator;
6187 }
6188 
6189 VkResult VmaBlockVector::Defragment(
6190  VmaDefragmentationStats* pDefragmentationStats,
6191  VkDeviceSize& maxBytesToMove,
6192  uint32_t& maxAllocationsToMove)
6193 {
6194  if(m_pDefragmentator == VMA_NULL)
6195  {
6196  return VK_SUCCESS;
6197  }
6198 
6199  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6200 
6201  // Defragment.
6202  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6203 
6204  // Accumulate statistics.
6205  if(pDefragmentationStats != VMA_NULL)
6206  {
6207  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6208  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6209  pDefragmentationStats->bytesMoved += bytesMoved;
6210  pDefragmentationStats->allocationsMoved += allocationsMoved;
6211  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6212  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6213  maxBytesToMove -= bytesMoved;
6214  maxAllocationsToMove -= allocationsMoved;
6215  }
6216 
6217  // Free empty blocks.
6218  m_HasEmptyBlock = false;
6219  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6220  {
6221  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6222  if(pBlock->m_Metadata.IsEmpty())
6223  {
6224  if(m_Blocks.size() > m_MinBlockCount)
6225  {
6226  if(pDefragmentationStats != VMA_NULL)
6227  {
6228  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6229  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6230  }
6231 
6232  VmaVectorRemove(m_Blocks, blockIndex);
6233  pBlock->Destroy(m_hAllocator);
6234  vma_delete(m_hAllocator, pBlock);
6235  }
6236  else
6237  {
6238  m_HasEmptyBlock = true;
6239  }
6240  }
6241  }
6242 
6243  return result;
6244 }
6245 
6246 void VmaBlockVector::DestroyDefragmentator()
6247 {
6248  if(m_pDefragmentator != VMA_NULL)
6249  {
6250  vma_delete(m_hAllocator, m_pDefragmentator);
6251  m_pDefragmentator = VMA_NULL;
6252  }
6253 }
6254 
6255 void VmaBlockVector::MakePoolAllocationsLost(
6256  uint32_t currentFrameIndex,
6257  size_t* pLostAllocationCount)
6258 {
6259  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6260 
6261  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6262  {
6263  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6264  VMA_ASSERT(pBlock);
6265  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6266  }
6267 }
6268 
6269 void VmaBlockVector::AddStats(VmaStats* pStats)
6270 {
6271  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6272  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6273 
6274  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6275 
6276  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6277  {
6278  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6279  VMA_ASSERT(pBlock);
6280  VMA_HEAVY_ASSERT(pBlock->Validate());
6281  VmaStatInfo allocationStatInfo;
6282  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6283  VmaAddStatInfo(pStats->total, allocationStatInfo);
6284  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6285  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6286  }
6287 }
6288 
6290 // VmaDefragmentator members definition
6291 
6292 VmaDefragmentator::VmaDefragmentator(
6293  VmaAllocator hAllocator,
6294  VmaBlockVector* pBlockVector,
6295  uint32_t currentFrameIndex) :
6296  m_hAllocator(hAllocator),
6297  m_pBlockVector(pBlockVector),
6298  m_CurrentFrameIndex(currentFrameIndex),
6299  m_BytesMoved(0),
6300  m_AllocationsMoved(0),
6301  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6302  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6303 {
6304 }
6305 
6306 VmaDefragmentator::~VmaDefragmentator()
6307 {
6308  for(size_t i = m_Blocks.size(); i--; )
6309  {
6310  vma_delete(m_hAllocator, m_Blocks[i]);
6311  }
6312 }
6313 
6314 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6315 {
6316  AllocationInfo allocInfo;
6317  allocInfo.m_hAllocation = hAlloc;
6318  allocInfo.m_pChanged = pChanged;
6319  m_Allocations.push_back(allocInfo);
6320 }
6321 
6322 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6323 {
6324  // It has already been mapped for defragmentation.
6325  if(m_pMappedDataForDefragmentation)
6326  {
6327  *ppMappedData = m_pMappedDataForDefragmentation;
6328  return VK_SUCCESS;
6329  }
6330 
6331  // It is originally mapped.
6332  if(m_pBlock->m_Mapping.GetMappedData())
6333  {
6334  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6335  return VK_SUCCESS;
6336  }
6337 
6338  // Map on first usage.
6339  VkResult res = m_pBlock->Map(hAllocator, &m_pMappedDataForDefragmentation);
6340  *ppMappedData = m_pMappedDataForDefragmentation;
6341  return res;
6342 }
6343 
6344 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6345 {
6346  if(m_pMappedDataForDefragmentation != VMA_NULL)
6347  {
6348  m_pBlock->Unmap(hAllocator);
6349  }
6350 }
6351 
6352 VkResult VmaDefragmentator::DefragmentRound(
6353  VkDeviceSize maxBytesToMove,
6354  uint32_t maxAllocationsToMove)
6355 {
6356  if(m_Blocks.empty())
6357  {
6358  return VK_SUCCESS;
6359  }
6360 
6361  size_t srcBlockIndex = m_Blocks.size() - 1;
6362  size_t srcAllocIndex = SIZE_MAX;
6363  for(;;)
6364  {
6365  // 1. Find next allocation to move.
6366  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6367  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6368  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6369  {
6370  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6371  {
6372  // Finished: no more allocations to process.
6373  if(srcBlockIndex == 0)
6374  {
6375  return VK_SUCCESS;
6376  }
6377  else
6378  {
6379  --srcBlockIndex;
6380  srcAllocIndex = SIZE_MAX;
6381  }
6382  }
6383  else
6384  {
6385  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6386  }
6387  }
6388 
6389  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6390  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6391 
6392  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6393  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6394  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6395  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6396 
6397  // 2. Try to find new place for this allocation in preceding or current block.
6398  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6399  {
6400  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6401  VmaAllocationRequest dstAllocRequest;
6402  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6403  m_CurrentFrameIndex,
6404  m_pBlockVector->GetFrameInUseCount(),
6405  m_pBlockVector->GetBufferImageGranularity(),
6406  size,
6407  alignment,
6408  suballocType,
6409  false, // canMakeOtherLost
6410  &dstAllocRequest) &&
6411  MoveMakesSense(
6412  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6413  {
6414  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6415 
6416  // Reached limit on number of allocations or bytes to move.
6417  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6418  (m_BytesMoved + size > maxBytesToMove))
6419  {
6420  return VK_INCOMPLETE;
6421  }
6422 
6423  void* pDstMappedData = VMA_NULL;
6424  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6425  if(res != VK_SUCCESS)
6426  {
6427  return res;
6428  }
6429 
6430  void* pSrcMappedData = VMA_NULL;
6431  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6432  if(res != VK_SUCCESS)
6433  {
6434  return res;
6435  }
6436 
6437  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6438  memcpy(
6439  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6440  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6441  static_cast<size_t>(size));
6442 
6443  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6444  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
6445 
6446  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6447 
6448  if(allocInfo.m_pChanged != VMA_NULL)
6449  {
6450  *allocInfo.m_pChanged = VK_TRUE;
6451  }
6452 
6453  ++m_AllocationsMoved;
6454  m_BytesMoved += size;
6455 
6456  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6457 
6458  break;
6459  }
6460  }
6461 
6462  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6463 
6464  if(srcAllocIndex > 0)
6465  {
6466  --srcAllocIndex;
6467  }
6468  else
6469  {
6470  if(srcBlockIndex > 0)
6471  {
6472  --srcBlockIndex;
6473  srcAllocIndex = SIZE_MAX;
6474  }
6475  else
6476  {
6477  return VK_SUCCESS;
6478  }
6479  }
6480  }
6481 }
6482 
6483 VkResult VmaDefragmentator::Defragment(
6484  VkDeviceSize maxBytesToMove,
6485  uint32_t maxAllocationsToMove)
6486 {
6487  if(m_Allocations.empty())
6488  {
6489  return VK_SUCCESS;
6490  }
6491 
6492  // Create block info for each block.
6493  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6494  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6495  {
6496  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6497  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6498  m_Blocks.push_back(pBlockInfo);
6499  }
6500 
6501  // Sort them by m_pBlock pointer value.
6502  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6503 
6504  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6505  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6506  {
6507  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6508  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6509  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6510  {
6511  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6512  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6513  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6514  {
6515  (*it)->m_Allocations.push_back(allocInfo);
6516  }
6517  else
6518  {
6519  VMA_ASSERT(0);
6520  }
6521  }
6522  }
6523  m_Allocations.clear();
6524 
6525  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6526  {
6527  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6528  pBlockInfo->CalcHasNonMovableAllocations();
6529  pBlockInfo->SortAllocationsBySizeDescecnding();
6530  }
6531 
6532  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6533  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6534 
6535  // Execute defragmentation rounds (the main part).
6536  VkResult result = VK_SUCCESS;
6537  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6538  {
6539  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6540  }
6541 
6542  // Unmap blocks that were mapped for defragmentation.
6543  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6544  {
6545  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6546  }
6547 
6548  return result;
6549 }
6550 
6551 bool VmaDefragmentator::MoveMakesSense(
6552  size_t dstBlockIndex, VkDeviceSize dstOffset,
6553  size_t srcBlockIndex, VkDeviceSize srcOffset)
6554 {
6555  if(dstBlockIndex < srcBlockIndex)
6556  {
6557  return true;
6558  }
6559  if(dstBlockIndex > srcBlockIndex)
6560  {
6561  return false;
6562  }
6563  if(dstOffset < srcOffset)
6564  {
6565  return true;
6566  }
6567  return false;
6568 }
6569 
6571 // VmaAllocator_T
6572 
6573 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6574  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6575  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
6576  m_PhysicalDevice(pCreateInfo->physicalDevice),
6577  m_hDevice(pCreateInfo->device),
6578  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6579  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6580  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6581  m_PreferredLargeHeapBlockSize(0),
6582  m_PreferredSmallHeapBlockSize(0),
6583  m_CurrentFrameIndex(0),
6584  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6585 {
6586  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6587 
6588  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6589  memset(&m_MemProps, 0, sizeof(m_MemProps));
6590  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6591 
6592  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6593  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
6594 
6595  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6596  {
6597  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6598  }
6599 
6600  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6601  {
6602  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6603  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6604  }
6605 
6606  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6607 
6608  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6609  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6610 
6611  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6612  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6613  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6614  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6615 
6616  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6617  {
6618  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6619  {
6620  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6621  if(limit != VK_WHOLE_SIZE)
6622  {
6623  m_HeapSizeLimit[heapIndex] = limit;
6624  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6625  {
6626  m_MemProps.memoryHeaps[heapIndex].size = limit;
6627  }
6628  }
6629  }
6630  }
6631 
6632  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6633  {
6634  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6635 
6636  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
6637  this,
6638  memTypeIndex,
6639  preferredBlockSize,
6640  0,
6641  SIZE_MAX,
6642  GetBufferImageGranularity(),
6643  pCreateInfo->frameInUseCount,
6644  false); // isCustomPool
6645  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6646  // becase minBlockCount is 0.
6647  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6648  }
6649 }
6650 
6651 VmaAllocator_T::~VmaAllocator_T()
6652 {
6653  VMA_ASSERT(m_Pools.empty());
6654 
6655  for(size_t i = GetMemoryTypeCount(); i--; )
6656  {
6657  vma_delete(this, m_pDedicatedAllocations[i]);
6658  vma_delete(this, m_pBlockVectors[i]);
6659  }
6660 }
6661 
6662 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6663 {
6664 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6665  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6666  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6667  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6668  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6669  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6670  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6671  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6672  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6673  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6674  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6675  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6676  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6677  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6678  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6679  // Ignoring vkGetBufferMemoryRequirements2KHR.
6680  // Ignoring vkGetImageMemoryRequirements2KHR.
6681 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6682 
6683 #define VMA_COPY_IF_NOT_NULL(funcName) \
6684  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
6685 
6686  if(pVulkanFunctions != VMA_NULL)
6687  {
6688  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
6689  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
6690  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
6691  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
6692  VMA_COPY_IF_NOT_NULL(vkMapMemory);
6693  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
6694  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
6695  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
6696  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
6697  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
6698  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
6699  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
6700  VMA_COPY_IF_NOT_NULL(vkCreateImage);
6701  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
6702  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
6703  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
6704  }
6705 
6706 #undef VMA_COPY_IF_NOT_NULL
6707 
6708  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6709  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6710  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6711  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6712  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6713  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6714  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6715  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6716  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6717  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6718  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6719  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6720  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6721  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6722  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6723  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6724  if(m_UseKhrDedicatedAllocation)
6725  {
6726  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
6727  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
6728  }
6729 }
6730 
6731 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6732 {
6733  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6734  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6735  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6736  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6737 }
6738 
6739 VkResult VmaAllocator_T::AllocateMemoryOfType(
6740  const VkMemoryRequirements& vkMemReq,
6741  bool dedicatedAllocation,
6742  VkBuffer dedicatedBuffer,
6743  VkImage dedicatedImage,
6744  const VmaAllocationCreateInfo& createInfo,
6745  uint32_t memTypeIndex,
6746  VmaSuballocationType suballocType,
6747  VmaAllocation* pAllocation)
6748 {
6749  VMA_ASSERT(pAllocation != VMA_NULL);
6750  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6751 
6752  VmaAllocationCreateInfo finalCreateInfo = createInfo;
6753 
6754  // If memory type is not HOST_VISIBLE, disable MAPPED.
6755  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
6756  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
6757  {
6758  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
6759  }
6760 
6761  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
6762  VMA_ASSERT(blockVector);
6763 
6764  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6765  bool preferDedicatedMemory =
6766  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
6767  dedicatedAllocation ||
6768  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
6769  vkMemReq.size > preferredBlockSize / 2;
6770 
6771  if(preferDedicatedMemory &&
6772  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6773  finalCreateInfo.pool == VK_NULL_HANDLE)
6774  {
6776  }
6777 
6778  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
6779  {
6780  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6781  {
6782  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6783  }
6784  else
6785  {
6786  return AllocateDedicatedMemory(
6787  vkMemReq.size,
6788  suballocType,
6789  memTypeIndex,
6790  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
6791  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
6792  finalCreateInfo.pUserData,
6793  dedicatedBuffer,
6794  dedicatedImage,
6795  pAllocation);
6796  }
6797  }
6798  else
6799  {
6800  VkResult res = blockVector->Allocate(
6801  VK_NULL_HANDLE, // hCurrentPool
6802  m_CurrentFrameIndex.load(),
6803  vkMemReq,
6804  finalCreateInfo,
6805  suballocType,
6806  pAllocation);
6807  if(res == VK_SUCCESS)
6808  {
6809  return res;
6810  }
6811 
6812  // 5. Try dedicated memory.
6813  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6814  {
6815  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6816  }
6817  else
6818  {
6819  res = AllocateDedicatedMemory(
6820  vkMemReq.size,
6821  suballocType,
6822  memTypeIndex,
6823  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
6824  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
6825  finalCreateInfo.pUserData,
6826  dedicatedBuffer,
6827  dedicatedImage,
6828  pAllocation);
6829  if(res == VK_SUCCESS)
6830  {
6831  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
6832  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
6833  return VK_SUCCESS;
6834  }
6835  else
6836  {
6837  // Everything failed: Return error code.
6838  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6839  return res;
6840  }
6841  }
6842  }
6843 }
6844 
6845 VkResult VmaAllocator_T::AllocateDedicatedMemory(
6846  VkDeviceSize size,
6847  VmaSuballocationType suballocType,
6848  uint32_t memTypeIndex,
6849  bool map,
6850  bool isUserDataString,
6851  void* pUserData,
6852  VkBuffer dedicatedBuffer,
6853  VkImage dedicatedImage,
6854  VmaAllocation* pAllocation)
6855 {
6856  VMA_ASSERT(pAllocation);
6857 
6858  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6859  allocInfo.memoryTypeIndex = memTypeIndex;
6860  allocInfo.allocationSize = size;
6861 
6862  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
6863  if(m_UseKhrDedicatedAllocation)
6864  {
6865  if(dedicatedBuffer != VK_NULL_HANDLE)
6866  {
6867  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
6868  dedicatedAllocInfo.buffer = dedicatedBuffer;
6869  allocInfo.pNext = &dedicatedAllocInfo;
6870  }
6871  else if(dedicatedImage != VK_NULL_HANDLE)
6872  {
6873  dedicatedAllocInfo.image = dedicatedImage;
6874  allocInfo.pNext = &dedicatedAllocInfo;
6875  }
6876  }
6877 
6878  // Allocate VkDeviceMemory.
6879  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6880  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
6881  if(res < 0)
6882  {
6883  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6884  return res;
6885  }
6886 
6887  void* pMappedData = nullptr;
6888  if(map)
6889  {
6890  res = (*m_VulkanFunctions.vkMapMemory)(
6891  m_hDevice,
6892  hMemory,
6893  0,
6894  VK_WHOLE_SIZE,
6895  0,
6896  &pMappedData);
6897  if(res < 0)
6898  {
6899  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6900  FreeVulkanMemory(memTypeIndex, size, hMemory);
6901  return res;
6902  }
6903  }
6904 
6905  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
6906  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
6907  (*pAllocation)->SetUserData(this, pUserData);
6908 
6909  // Register it in m_pDedicatedAllocations.
6910  {
6911  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6912  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
6913  VMA_ASSERT(pDedicatedAllocations);
6914  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
6915  }
6916 
6917  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
6918 
6919  return VK_SUCCESS;
6920 }
6921 
6922 void VmaAllocator_T::GetBufferMemoryRequirements(
6923  VkBuffer hBuffer,
6924  VkMemoryRequirements& memReq,
6925  bool& requiresDedicatedAllocation,
6926  bool& prefersDedicatedAllocation) const
6927 {
6928  if(m_UseKhrDedicatedAllocation)
6929  {
6930  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
6931  memReqInfo.buffer = hBuffer;
6932 
6933  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
6934 
6935  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
6936  memReq2.pNext = &memDedicatedReq;
6937 
6938  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
6939 
6940  memReq = memReq2.memoryRequirements;
6941  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
6942  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
6943  }
6944  else
6945  {
6946  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
6947  requiresDedicatedAllocation = false;
6948  prefersDedicatedAllocation = false;
6949  }
6950 }
6951 
6952 void VmaAllocator_T::GetImageMemoryRequirements(
6953  VkImage hImage,
6954  VkMemoryRequirements& memReq,
6955  bool& requiresDedicatedAllocation,
6956  bool& prefersDedicatedAllocation) const
6957 {
6958  if(m_UseKhrDedicatedAllocation)
6959  {
6960  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
6961  memReqInfo.image = hImage;
6962 
6963  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
6964 
6965  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
6966  memReq2.pNext = &memDedicatedReq;
6967 
6968  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
6969 
6970  memReq = memReq2.memoryRequirements;
6971  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
6972  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
6973  }
6974  else
6975  {
6976  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
6977  requiresDedicatedAllocation = false;
6978  prefersDedicatedAllocation = false;
6979  }
6980 }
6981 
6982 VkResult VmaAllocator_T::AllocateMemory(
6983  const VkMemoryRequirements& vkMemReq,
6984  bool requiresDedicatedAllocation,
6985  bool prefersDedicatedAllocation,
6986  VkBuffer dedicatedBuffer,
6987  VkImage dedicatedImage,
6988  const VmaAllocationCreateInfo& createInfo,
6989  VmaSuballocationType suballocType,
6990  VmaAllocation* pAllocation)
6991 {
6992  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
6993  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6994  {
6995  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6996  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6997  }
6998  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7000  {
7001  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7002  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7003  }
7004  if(requiresDedicatedAllocation)
7005  {
7006  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7007  {
7008  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7009  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7010  }
7011  if(createInfo.pool != VK_NULL_HANDLE)
7012  {
7013  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7014  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7015  }
7016  }
7017  if((createInfo.pool != VK_NULL_HANDLE) &&
7018  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7019  {
7020  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7021  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7022  }
7023 
7024  if(createInfo.pool != VK_NULL_HANDLE)
7025  {
7026  return createInfo.pool->m_BlockVector.Allocate(
7027  createInfo.pool,
7028  m_CurrentFrameIndex.load(),
7029  vkMemReq,
7030  createInfo,
7031  suballocType,
7032  pAllocation);
7033  }
7034  else
7035  {
7036  // Bit mask of memory Vulkan types acceptable for this allocation.
7037  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7038  uint32_t memTypeIndex = UINT32_MAX;
7039  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7040  if(res == VK_SUCCESS)
7041  {
7042  res = AllocateMemoryOfType(
7043  vkMemReq,
7044  requiresDedicatedAllocation || prefersDedicatedAllocation,
7045  dedicatedBuffer,
7046  dedicatedImage,
7047  createInfo,
7048  memTypeIndex,
7049  suballocType,
7050  pAllocation);
7051  // Succeeded on first try.
7052  if(res == VK_SUCCESS)
7053  {
7054  return res;
7055  }
7056  // Allocation from this memory type failed. Try other compatible memory types.
7057  else
7058  {
7059  for(;;)
7060  {
7061  // Remove old memTypeIndex from list of possibilities.
7062  memoryTypeBits &= ~(1u << memTypeIndex);
7063  // Find alternative memTypeIndex.
7064  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7065  if(res == VK_SUCCESS)
7066  {
7067  res = AllocateMemoryOfType(
7068  vkMemReq,
7069  requiresDedicatedAllocation || prefersDedicatedAllocation,
7070  dedicatedBuffer,
7071  dedicatedImage,
7072  createInfo,
7073  memTypeIndex,
7074  suballocType,
7075  pAllocation);
7076  // Allocation from this alternative memory type succeeded.
7077  if(res == VK_SUCCESS)
7078  {
7079  return res;
7080  }
7081  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7082  }
7083  // No other matching memory type index could be found.
7084  else
7085  {
7086  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7087  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7088  }
7089  }
7090  }
7091  }
7092  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7093  else
7094  return res;
7095  }
7096 }
7097 
7098 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7099 {
7100  VMA_ASSERT(allocation);
7101 
7102  if(allocation->CanBecomeLost() == false ||
7103  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7104  {
7105  switch(allocation->GetType())
7106  {
7107  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7108  {
7109  VmaBlockVector* pBlockVector = VMA_NULL;
7110  VmaPool hPool = allocation->GetPool();
7111  if(hPool != VK_NULL_HANDLE)
7112  {
7113  pBlockVector = &hPool->m_BlockVector;
7114  }
7115  else
7116  {
7117  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7118  pBlockVector = m_pBlockVectors[memTypeIndex];
7119  }
7120  pBlockVector->Free(allocation);
7121  }
7122  break;
7123  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7124  FreeDedicatedMemory(allocation);
7125  break;
7126  default:
7127  VMA_ASSERT(0);
7128  }
7129  }
7130 
7131  allocation->SetUserData(this, VMA_NULL);
7132  vma_delete(this, allocation);
7133 }
7134 
7135 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7136 {
7137  // Initialize.
7138  InitStatInfo(pStats->total);
7139  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7140  InitStatInfo(pStats->memoryType[i]);
7141  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7142  InitStatInfo(pStats->memoryHeap[i]);
7143 
7144  // Process default pools.
7145  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7146  {
7147  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7148  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7149  VMA_ASSERT(pBlockVector);
7150  pBlockVector->AddStats(pStats);
7151  }
7152 
7153  // Process custom pools.
7154  {
7155  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7156  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7157  {
7158  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7159  }
7160  }
7161 
7162  // Process dedicated allocations.
7163  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7164  {
7165  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7166  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7167  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7168  VMA_ASSERT(pDedicatedAllocVector);
7169  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7170  {
7171  VmaStatInfo allocationStatInfo;
7172  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7173  VmaAddStatInfo(pStats->total, allocationStatInfo);
7174  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7175  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7176  }
7177  }
7178 
7179  // Postprocess.
7180  VmaPostprocessCalcStatInfo(pStats->total);
7181  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7182  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7183  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7184  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7185 }
7186 
7187 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7188 
7189 VkResult VmaAllocator_T::Defragment(
7190  VmaAllocation* pAllocations,
7191  size_t allocationCount,
7192  VkBool32* pAllocationsChanged,
7193  const VmaDefragmentationInfo* pDefragmentationInfo,
7194  VmaDefragmentationStats* pDefragmentationStats)
7195 {
7196  if(pAllocationsChanged != VMA_NULL)
7197  {
7198  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7199  }
7200  if(pDefragmentationStats != VMA_NULL)
7201  {
7202  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7203  }
7204 
7205  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7206 
7207  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7208 
7209  const size_t poolCount = m_Pools.size();
7210 
7211  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7212  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7213  {
7214  VmaAllocation hAlloc = pAllocations[allocIndex];
7215  VMA_ASSERT(hAlloc);
7216  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7217  // DedicatedAlloc cannot be defragmented.
7218  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7219  // Only HOST_VISIBLE memory types can be defragmented.
7220  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7221  // Lost allocation cannot be defragmented.
7222  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7223  {
7224  VmaBlockVector* pAllocBlockVector = nullptr;
7225 
7226  const VmaPool hAllocPool = hAlloc->GetPool();
7227  // This allocation belongs to custom pool.
7228  if(hAllocPool != VK_NULL_HANDLE)
7229  {
7230  pAllocBlockVector = &hAllocPool->GetBlockVector();
7231  }
7232  // This allocation belongs to general pool.
7233  else
7234  {
7235  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7236  }
7237 
7238  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7239 
7240  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7241  &pAllocationsChanged[allocIndex] : VMA_NULL;
7242  pDefragmentator->AddAllocation(hAlloc, pChanged);
7243  }
7244  }
7245 
7246  VkResult result = VK_SUCCESS;
7247 
7248  // ======== Main processing.
7249 
7250  VkDeviceSize maxBytesToMove = SIZE_MAX;
7251  uint32_t maxAllocationsToMove = UINT32_MAX;
7252  if(pDefragmentationInfo != VMA_NULL)
7253  {
7254  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7255  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7256  }
7257 
7258  // Process standard memory.
7259  for(uint32_t memTypeIndex = 0;
7260  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7261  ++memTypeIndex)
7262  {
7263  // Only HOST_VISIBLE memory types can be defragmented.
7264  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7265  {
7266  result = m_pBlockVectors[memTypeIndex]->Defragment(
7267  pDefragmentationStats,
7268  maxBytesToMove,
7269  maxAllocationsToMove);
7270  }
7271  }
7272 
7273  // Process custom pools.
7274  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7275  {
7276  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7277  pDefragmentationStats,
7278  maxBytesToMove,
7279  maxAllocationsToMove);
7280  }
7281 
7282  // ======== Destroy defragmentators.
7283 
7284  // Process custom pools.
7285  for(size_t poolIndex = poolCount; poolIndex--; )
7286  {
7287  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7288  }
7289 
7290  // Process standard memory.
7291  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7292  {
7293  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7294  {
7295  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7296  }
7297  }
7298 
7299  return result;
7300 }
7301 
7302 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7303 {
7304  if(hAllocation->CanBecomeLost())
7305  {
7306  /*
7307  Warning: This is a carefully designed algorithm.
7308  Do not modify unless you really know what you're doing :)
7309  */
7310  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7311  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7312  for(;;)
7313  {
7314  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7315  {
7316  pAllocationInfo->memoryType = UINT32_MAX;
7317  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7318  pAllocationInfo->offset = 0;
7319  pAllocationInfo->size = hAllocation->GetSize();
7320  pAllocationInfo->pMappedData = VMA_NULL;
7321  pAllocationInfo->pUserData = hAllocation->GetUserData();
7322  return;
7323  }
7324  else if(localLastUseFrameIndex == localCurrFrameIndex)
7325  {
7326  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7327  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7328  pAllocationInfo->offset = hAllocation->GetOffset();
7329  pAllocationInfo->size = hAllocation->GetSize();
7330  pAllocationInfo->pMappedData = VMA_NULL;
7331  pAllocationInfo->pUserData = hAllocation->GetUserData();
7332  return;
7333  }
7334  else // Last use time earlier than current time.
7335  {
7336  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7337  {
7338  localLastUseFrameIndex = localCurrFrameIndex;
7339  }
7340  }
7341  }
7342  }
7343  else
7344  {
7345  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7346  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7347  pAllocationInfo->offset = hAllocation->GetOffset();
7348  pAllocationInfo->size = hAllocation->GetSize();
7349  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7350  pAllocationInfo->pUserData = hAllocation->GetUserData();
7351  }
7352 }
7353 
7354 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7355 {
7356  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7357 
7358  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7359 
7360  if(newCreateInfo.maxBlockCount == 0)
7361  {
7362  newCreateInfo.maxBlockCount = SIZE_MAX;
7363  }
7364  if(newCreateInfo.blockSize == 0)
7365  {
7366  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7367  }
7368 
7369  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7370 
7371  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7372  if(res != VK_SUCCESS)
7373  {
7374  vma_delete(this, *pPool);
7375  *pPool = VMA_NULL;
7376  return res;
7377  }
7378 
7379  // Add to m_Pools.
7380  {
7381  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7382  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7383  }
7384 
7385  return VK_SUCCESS;
7386 }
7387 
7388 void VmaAllocator_T::DestroyPool(VmaPool pool)
7389 {
7390  // Remove from m_Pools.
7391  {
7392  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7393  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7394  VMA_ASSERT(success && "Pool not found in Allocator.");
7395  }
7396 
7397  vma_delete(this, pool);
7398 }
7399 
7400 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7401 {
7402  pool->m_BlockVector.GetPoolStats(pPoolStats);
7403 }
7404 
7405 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7406 {
7407  m_CurrentFrameIndex.store(frameIndex);
7408 }
7409 
7410 void VmaAllocator_T::MakePoolAllocationsLost(
7411  VmaPool hPool,
7412  size_t* pLostAllocationCount)
7413 {
7414  hPool->m_BlockVector.MakePoolAllocationsLost(
7415  m_CurrentFrameIndex.load(),
7416  pLostAllocationCount);
7417 }
7418 
7419 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7420 {
7421  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7422  (*pAllocation)->InitLost();
7423 }
7424 
7425 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7426 {
7427  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7428 
7429  VkResult res;
7430  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7431  {
7432  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7433  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7434  {
7435  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7436  if(res == VK_SUCCESS)
7437  {
7438  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7439  }
7440  }
7441  else
7442  {
7443  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7444  }
7445  }
7446  else
7447  {
7448  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7449  }
7450 
7451  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7452  {
7453  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7454  }
7455 
7456  return res;
7457 }
7458 
7459 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7460 {
7461  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7462  {
7463  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7464  }
7465 
7466  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7467 
7468  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7469  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7470  {
7471  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7472  m_HeapSizeLimit[heapIndex] += size;
7473  }
7474 }
7475 
7476 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
7477 {
7478  if(hAllocation->CanBecomeLost())
7479  {
7480  return VK_ERROR_MEMORY_MAP_FAILED;
7481  }
7482 
7483  switch(hAllocation->GetType())
7484  {
7485  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7486  {
7487  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7488  char *pBytes = nullptr;
7489  VkResult res = pBlock->Map(this, (void**)&pBytes);
7490  if(res == VK_SUCCESS)
7491  {
7492  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
7493  hAllocation->BlockAllocMap();
7494  }
7495  return res;
7496  }
7497  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7498  return hAllocation->DedicatedAllocMap(this, ppData);
7499  default:
7500  VMA_ASSERT(0);
7501  return VK_ERROR_MEMORY_MAP_FAILED;
7502  }
7503 }
7504 
7505 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
7506 {
7507  switch(hAllocation->GetType())
7508  {
7509  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7510  {
7511  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7512  hAllocation->BlockAllocUnmap();
7513  pBlock->Unmap(this);
7514  }
7515  break;
7516  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7517  hAllocation->DedicatedAllocUnmap(this);
7518  break;
7519  default:
7520  VMA_ASSERT(0);
7521  }
7522 }
7523 
7524 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
7525 {
7526  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
7527 
7528  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7529  {
7530  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7531  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7532  VMA_ASSERT(pDedicatedAllocations);
7533  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
7534  VMA_ASSERT(success);
7535  }
7536 
7537  VkDeviceMemory hMemory = allocation->GetMemory();
7538 
7539  if(allocation->GetMappedData() != VMA_NULL)
7540  {
7541  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
7542  }
7543 
7544  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
7545 
7546  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
7547 }
7548 
7549 #if VMA_STATS_STRING_ENABLED
7550 
7551 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
7552 {
7553  bool dedicatedAllocationsStarted = false;
7554  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7555  {
7556  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7557  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7558  VMA_ASSERT(pDedicatedAllocVector);
7559  if(pDedicatedAllocVector->empty() == false)
7560  {
7561  if(dedicatedAllocationsStarted == false)
7562  {
7563  dedicatedAllocationsStarted = true;
7564  json.WriteString("DedicatedAllocations");
7565  json.BeginObject();
7566  }
7567 
7568  json.BeginString("Type ");
7569  json.ContinueString(memTypeIndex);
7570  json.EndString();
7571 
7572  json.BeginArray();
7573 
7574  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
7575  {
7576  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
7577  json.BeginObject(true);
7578 
7579  json.WriteString("Type");
7580  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
7581 
7582  json.WriteString("Size");
7583  json.WriteNumber(hAlloc->GetSize());
7584 
7585  const void* pUserData = hAlloc->GetUserData();
7586  if(pUserData != VMA_NULL)
7587  {
7588  json.WriteString("UserData");
7589  if(hAlloc->IsUserDataString())
7590  {
7591  json.WriteString((const char*)pUserData);
7592  }
7593  else
7594  {
7595  json.BeginString();
7596  json.ContinueString_Pointer(pUserData);
7597  json.EndString();
7598  }
7599  }
7600 
7601  json.EndObject();
7602  }
7603 
7604  json.EndArray();
7605  }
7606  }
7607  if(dedicatedAllocationsStarted)
7608  {
7609  json.EndObject();
7610  }
7611 
7612  {
7613  bool allocationsStarted = false;
7614  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7615  {
7616  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
7617  {
7618  if(allocationsStarted == false)
7619  {
7620  allocationsStarted = true;
7621  json.WriteString("DefaultPools");
7622  json.BeginObject();
7623  }
7624 
7625  json.BeginString("Type ");
7626  json.ContinueString(memTypeIndex);
7627  json.EndString();
7628 
7629  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
7630  }
7631  }
7632  if(allocationsStarted)
7633  {
7634  json.EndObject();
7635  }
7636  }
7637 
7638  {
7639  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7640  const size_t poolCount = m_Pools.size();
7641  if(poolCount > 0)
7642  {
7643  json.WriteString("Pools");
7644  json.BeginArray();
7645  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7646  {
7647  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7648  }
7649  json.EndArray();
7650  }
7651  }
7652 }
7653 
7654 #endif // #if VMA_STATS_STRING_ENABLED
7655 
7656 static VkResult AllocateMemoryForImage(
7657  VmaAllocator allocator,
7658  VkImage image,
7659  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7660  VmaSuballocationType suballocType,
7661  VmaAllocation* pAllocation)
7662 {
7663  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7664 
7665  VkMemoryRequirements vkMemReq = {};
7666  bool requiresDedicatedAllocation = false;
7667  bool prefersDedicatedAllocation = false;
7668  allocator->GetImageMemoryRequirements(image, vkMemReq,
7669  requiresDedicatedAllocation, prefersDedicatedAllocation);
7670 
7671  return allocator->AllocateMemory(
7672  vkMemReq,
7673  requiresDedicatedAllocation,
7674  prefersDedicatedAllocation,
7675  VK_NULL_HANDLE, // dedicatedBuffer
7676  image, // dedicatedImage
7677  *pAllocationCreateInfo,
7678  suballocType,
7679  pAllocation);
7680 }
7681 
7683 // Public interface
7684 
7685 VkResult vmaCreateAllocator(
7686  const VmaAllocatorCreateInfo* pCreateInfo,
7687  VmaAllocator* pAllocator)
7688 {
7689  VMA_ASSERT(pCreateInfo && pAllocator);
7690  VMA_DEBUG_LOG("vmaCreateAllocator");
7691  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7692  return VK_SUCCESS;
7693 }
7694 
7695 void vmaDestroyAllocator(
7696  VmaAllocator allocator)
7697 {
7698  if(allocator != VK_NULL_HANDLE)
7699  {
7700  VMA_DEBUG_LOG("vmaDestroyAllocator");
7701  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7702  vma_delete(&allocationCallbacks, allocator);
7703  }
7704 }
7705 
7707  VmaAllocator allocator,
7708  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7709 {
7710  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7711  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7712 }
7713 
7715  VmaAllocator allocator,
7716  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7717 {
7718  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7719  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7720 }
7721 
7723  VmaAllocator allocator,
7724  uint32_t memoryTypeIndex,
7725  VkMemoryPropertyFlags* pFlags)
7726 {
7727  VMA_ASSERT(allocator && pFlags);
7728  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7729  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7730 }
7731 
7733  VmaAllocator allocator,
7734  uint32_t frameIndex)
7735 {
7736  VMA_ASSERT(allocator);
7737  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7738 
7739  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7740 
7741  allocator->SetCurrentFrameIndex(frameIndex);
7742 }
7743 
7744 void vmaCalculateStats(
7745  VmaAllocator allocator,
7746  VmaStats* pStats)
7747 {
7748  VMA_ASSERT(allocator && pStats);
7749  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7750  allocator->CalculateStats(pStats);
7751 }
7752 
7753 #if VMA_STATS_STRING_ENABLED
7754 
7755 void vmaBuildStatsString(
7756  VmaAllocator allocator,
7757  char** ppStatsString,
7758  VkBool32 detailedMap)
7759 {
7760  VMA_ASSERT(allocator && ppStatsString);
7761  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7762 
7763  VmaStringBuilder sb(allocator);
7764  {
7765  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7766  json.BeginObject();
7767 
7768  VmaStats stats;
7769  allocator->CalculateStats(&stats);
7770 
7771  json.WriteString("Total");
7772  VmaPrintStatInfo(json, stats.total);
7773 
7774  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7775  {
7776  json.BeginString("Heap ");
7777  json.ContinueString(heapIndex);
7778  json.EndString();
7779  json.BeginObject();
7780 
7781  json.WriteString("Size");
7782  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7783 
7784  json.WriteString("Flags");
7785  json.BeginArray(true);
7786  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7787  {
7788  json.WriteString("DEVICE_LOCAL");
7789  }
7790  json.EndArray();
7791 
7792  if(stats.memoryHeap[heapIndex].blockCount > 0)
7793  {
7794  json.WriteString("Stats");
7795  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7796  }
7797 
7798  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7799  {
7800  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7801  {
7802  json.BeginString("Type ");
7803  json.ContinueString(typeIndex);
7804  json.EndString();
7805 
7806  json.BeginObject();
7807 
7808  json.WriteString("Flags");
7809  json.BeginArray(true);
7810  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7811  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7812  {
7813  json.WriteString("DEVICE_LOCAL");
7814  }
7815  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7816  {
7817  json.WriteString("HOST_VISIBLE");
7818  }
7819  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7820  {
7821  json.WriteString("HOST_COHERENT");
7822  }
7823  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7824  {
7825  json.WriteString("HOST_CACHED");
7826  }
7827  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7828  {
7829  json.WriteString("LAZILY_ALLOCATED");
7830  }
7831  json.EndArray();
7832 
7833  if(stats.memoryType[typeIndex].blockCount > 0)
7834  {
7835  json.WriteString("Stats");
7836  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7837  }
7838 
7839  json.EndObject();
7840  }
7841  }
7842 
7843  json.EndObject();
7844  }
7845  if(detailedMap == VK_TRUE)
7846  {
7847  allocator->PrintDetailedMap(json);
7848  }
7849 
7850  json.EndObject();
7851  }
7852 
7853  const size_t len = sb.GetLength();
7854  char* const pChars = vma_new_array(allocator, char, len + 1);
7855  if(len > 0)
7856  {
7857  memcpy(pChars, sb.GetData(), len);
7858  }
7859  pChars[len] = '\0';
7860  *ppStatsString = pChars;
7861 }
7862 
7863 void vmaFreeStatsString(
7864  VmaAllocator allocator,
7865  char* pStatsString)
7866 {
7867  if(pStatsString != VMA_NULL)
7868  {
7869  VMA_ASSERT(allocator);
7870  size_t len = strlen(pStatsString);
7871  vma_delete_array(allocator, pStatsString, len + 1);
7872  }
7873 }
7874 
7875 #endif // #if VMA_STATS_STRING_ENABLED
7876 
7879 VkResult vmaFindMemoryTypeIndex(
7880  VmaAllocator allocator,
7881  uint32_t memoryTypeBits,
7882  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7883  uint32_t* pMemoryTypeIndex)
7884 {
7885  VMA_ASSERT(allocator != VK_NULL_HANDLE);
7886  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
7887  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
7888 
7889  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
7890  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
7891  if(preferredFlags == 0)
7892  {
7893  preferredFlags = requiredFlags;
7894  }
7895  // preferredFlags, if not 0, must be a superset of requiredFlags.
7896  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
7897 
7898  // Convert usage to requiredFlags and preferredFlags.
7899  switch(pAllocationCreateInfo->usage)
7900  {
7902  break;
7904  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7905  break;
7907  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
7908  break;
7910  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7911  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7912  break;
7914  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7915  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
7916  break;
7917  default:
7918  break;
7919  }
7920 
7921  *pMemoryTypeIndex = UINT32_MAX;
7922  uint32_t minCost = UINT32_MAX;
7923  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7924  memTypeIndex < allocator->GetMemoryTypeCount();
7925  ++memTypeIndex, memTypeBit <<= 1)
7926  {
7927  // This memory type is acceptable according to memoryTypeBits bitmask.
7928  if((memTypeBit & memoryTypeBits) != 0)
7929  {
7930  const VkMemoryPropertyFlags currFlags =
7931  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7932  // This memory type contains requiredFlags.
7933  if((requiredFlags & ~currFlags) == 0)
7934  {
7935  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7936  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7937  // Remember memory type with lowest cost.
7938  if(currCost < minCost)
7939  {
7940  *pMemoryTypeIndex = memTypeIndex;
7941  if(currCost == 0)
7942  {
7943  return VK_SUCCESS;
7944  }
7945  minCost = currCost;
7946  }
7947  }
7948  }
7949  }
7950  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7951 }
7952 
7953 VkResult vmaCreatePool(
7954  VmaAllocator allocator,
7955  const VmaPoolCreateInfo* pCreateInfo,
7956  VmaPool* pPool)
7957 {
7958  VMA_ASSERT(allocator && pCreateInfo && pPool);
7959 
7960  VMA_DEBUG_LOG("vmaCreatePool");
7961 
7962  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7963 
7964  return allocator->CreatePool(pCreateInfo, pPool);
7965 }
7966 
7967 void vmaDestroyPool(
7968  VmaAllocator allocator,
7969  VmaPool pool)
7970 {
7971  VMA_ASSERT(allocator);
7972 
7973  if(pool == VK_NULL_HANDLE)
7974  {
7975  return;
7976  }
7977 
7978  VMA_DEBUG_LOG("vmaDestroyPool");
7979 
7980  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7981 
7982  allocator->DestroyPool(pool);
7983 }
7984 
7985 void vmaGetPoolStats(
7986  VmaAllocator allocator,
7987  VmaPool pool,
7988  VmaPoolStats* pPoolStats)
7989 {
7990  VMA_ASSERT(allocator && pool && pPoolStats);
7991 
7992  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7993 
7994  allocator->GetPoolStats(pool, pPoolStats);
7995 }
7996 
7998  VmaAllocator allocator,
7999  VmaPool pool,
8000  size_t* pLostAllocationCount)
8001 {
8002  VMA_ASSERT(allocator && pool);
8003 
8004  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8005 
8006  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8007 }
8008 
8009 VkResult vmaAllocateMemory(
8010  VmaAllocator allocator,
8011  const VkMemoryRequirements* pVkMemoryRequirements,
8012  const VmaAllocationCreateInfo* pCreateInfo,
8013  VmaAllocation* pAllocation,
8014  VmaAllocationInfo* pAllocationInfo)
8015 {
8016  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8017 
8018  VMA_DEBUG_LOG("vmaAllocateMemory");
8019 
8020  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8021 
8022  VkResult result = allocator->AllocateMemory(
8023  *pVkMemoryRequirements,
8024  false, // requiresDedicatedAllocation
8025  false, // prefersDedicatedAllocation
8026  VK_NULL_HANDLE, // dedicatedBuffer
8027  VK_NULL_HANDLE, // dedicatedImage
8028  *pCreateInfo,
8029  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8030  pAllocation);
8031 
8032  if(pAllocationInfo && result == VK_SUCCESS)
8033  {
8034  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8035  }
8036 
8037  return result;
8038 }
8039 
8041  VmaAllocator allocator,
8042  VkBuffer buffer,
8043  const VmaAllocationCreateInfo* pCreateInfo,
8044  VmaAllocation* pAllocation,
8045  VmaAllocationInfo* pAllocationInfo)
8046 {
8047  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8048 
8049  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8050 
8051  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8052 
8053  VkMemoryRequirements vkMemReq = {};
8054  bool requiresDedicatedAllocation = false;
8055  bool prefersDedicatedAllocation = false;
8056  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8057  requiresDedicatedAllocation,
8058  prefersDedicatedAllocation);
8059 
8060  VkResult result = allocator->AllocateMemory(
8061  vkMemReq,
8062  requiresDedicatedAllocation,
8063  prefersDedicatedAllocation,
8064  buffer, // dedicatedBuffer
8065  VK_NULL_HANDLE, // dedicatedImage
8066  *pCreateInfo,
8067  VMA_SUBALLOCATION_TYPE_BUFFER,
8068  pAllocation);
8069 
8070  if(pAllocationInfo && result == VK_SUCCESS)
8071  {
8072  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8073  }
8074 
8075  return result;
8076 }
8077 
8078 VkResult vmaAllocateMemoryForImage(
8079  VmaAllocator allocator,
8080  VkImage image,
8081  const VmaAllocationCreateInfo* pCreateInfo,
8082  VmaAllocation* pAllocation,
8083  VmaAllocationInfo* pAllocationInfo)
8084 {
8085  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8086 
8087  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8088 
8089  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8090 
8091  VkResult result = AllocateMemoryForImage(
8092  allocator,
8093  image,
8094  pCreateInfo,
8095  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8096  pAllocation);
8097 
8098  if(pAllocationInfo && result == VK_SUCCESS)
8099  {
8100  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8101  }
8102 
8103  return result;
8104 }
8105 
8106 void vmaFreeMemory(
8107  VmaAllocator allocator,
8108  VmaAllocation allocation)
8109 {
8110  VMA_ASSERT(allocator && allocation);
8111 
8112  VMA_DEBUG_LOG("vmaFreeMemory");
8113 
8114  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8115 
8116  allocator->FreeMemory(allocation);
8117 }
8118 
8120  VmaAllocator allocator,
8121  VmaAllocation allocation,
8122  VmaAllocationInfo* pAllocationInfo)
8123 {
8124  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8125 
8126  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8127 
8128  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8129 }
8130 
8132  VmaAllocator allocator,
8133  VmaAllocation allocation,
8134  void* pUserData)
8135 {
8136  VMA_ASSERT(allocator && allocation);
8137 
8138  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8139 
8140  allocation->SetUserData(allocator, pUserData);
8141 }
8142 
8144  VmaAllocator allocator,
8145  VmaAllocation* pAllocation)
8146 {
8147  VMA_ASSERT(allocator && pAllocation);
8148 
8149  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8150 
8151  allocator->CreateLostAllocation(pAllocation);
8152 }
8153 
8154 VkResult vmaMapMemory(
8155  VmaAllocator allocator,
8156  VmaAllocation allocation,
8157  void** ppData)
8158 {
8159  VMA_ASSERT(allocator && allocation && ppData);
8160 
8161  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8162 
8163  return allocator->Map(allocation, ppData);
8164 }
8165 
8166 void vmaUnmapMemory(
8167  VmaAllocator allocator,
8168  VmaAllocation allocation)
8169 {
8170  VMA_ASSERT(allocator && allocation);
8171 
8172  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8173 
8174  allocator->Unmap(allocation);
8175 }
8176 
8177 VkResult vmaDefragment(
8178  VmaAllocator allocator,
8179  VmaAllocation* pAllocations,
8180  size_t allocationCount,
8181  VkBool32* pAllocationsChanged,
8182  const VmaDefragmentationInfo *pDefragmentationInfo,
8183  VmaDefragmentationStats* pDefragmentationStats)
8184 {
8185  VMA_ASSERT(allocator && pAllocations);
8186 
8187  VMA_DEBUG_LOG("vmaDefragment");
8188 
8189  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8190 
8191  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8192 }
8193 
8194 VkResult vmaCreateBuffer(
8195  VmaAllocator allocator,
8196  const VkBufferCreateInfo* pBufferCreateInfo,
8197  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8198  VkBuffer* pBuffer,
8199  VmaAllocation* pAllocation,
8200  VmaAllocationInfo* pAllocationInfo)
8201 {
8202  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8203 
8204  VMA_DEBUG_LOG("vmaCreateBuffer");
8205 
8206  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8207 
8208  *pBuffer = VK_NULL_HANDLE;
8209  *pAllocation = VK_NULL_HANDLE;
8210 
8211  // 1. Create VkBuffer.
8212  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8213  allocator->m_hDevice,
8214  pBufferCreateInfo,
8215  allocator->GetAllocationCallbacks(),
8216  pBuffer);
8217  if(res >= 0)
8218  {
8219  // 2. vkGetBufferMemoryRequirements.
8220  VkMemoryRequirements vkMemReq = {};
8221  bool requiresDedicatedAllocation = false;
8222  bool prefersDedicatedAllocation = false;
8223  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8224  requiresDedicatedAllocation, prefersDedicatedAllocation);
8225 
8226  // 3. Allocate memory using allocator.
8227  res = allocator->AllocateMemory(
8228  vkMemReq,
8229  requiresDedicatedAllocation,
8230  prefersDedicatedAllocation,
8231  *pBuffer, // dedicatedBuffer
8232  VK_NULL_HANDLE, // dedicatedImage
8233  *pAllocationCreateInfo,
8234  VMA_SUBALLOCATION_TYPE_BUFFER,
8235  pAllocation);
8236  if(res >= 0)
8237  {
8238  // 3. Bind buffer with memory.
8239  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8240  allocator->m_hDevice,
8241  *pBuffer,
8242  (*pAllocation)->GetMemory(),
8243  (*pAllocation)->GetOffset());
8244  if(res >= 0)
8245  {
8246  // All steps succeeded.
8247  if(pAllocationInfo != VMA_NULL)
8248  {
8249  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8250  }
8251  return VK_SUCCESS;
8252  }
8253  allocator->FreeMemory(*pAllocation);
8254  *pAllocation = VK_NULL_HANDLE;
8255  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8256  *pBuffer = VK_NULL_HANDLE;
8257  return res;
8258  }
8259  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8260  *pBuffer = VK_NULL_HANDLE;
8261  return res;
8262  }
8263  return res;
8264 }
8265 
8266 void vmaDestroyBuffer(
8267  VmaAllocator allocator,
8268  VkBuffer buffer,
8269  VmaAllocation allocation)
8270 {
8271  if(buffer != VK_NULL_HANDLE)
8272  {
8273  VMA_ASSERT(allocator);
8274 
8275  VMA_DEBUG_LOG("vmaDestroyBuffer");
8276 
8277  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8278 
8279  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8280 
8281  allocator->FreeMemory(allocation);
8282  }
8283 }
8284 
8285 VkResult vmaCreateImage(
8286  VmaAllocator allocator,
8287  const VkImageCreateInfo* pImageCreateInfo,
8288  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8289  VkImage* pImage,
8290  VmaAllocation* pAllocation,
8291  VmaAllocationInfo* pAllocationInfo)
8292 {
8293  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8294 
8295  VMA_DEBUG_LOG("vmaCreateImage");
8296 
8297  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8298 
8299  *pImage = VK_NULL_HANDLE;
8300  *pAllocation = VK_NULL_HANDLE;
8301 
8302  // 1. Create VkImage.
8303  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8304  allocator->m_hDevice,
8305  pImageCreateInfo,
8306  allocator->GetAllocationCallbacks(),
8307  pImage);
8308  if(res >= 0)
8309  {
8310  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8311  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8312  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8313 
8314  // 2. Allocate memory using allocator.
8315  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8316  if(res >= 0)
8317  {
8318  // 3. Bind image with memory.
8319  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8320  allocator->m_hDevice,
8321  *pImage,
8322  (*pAllocation)->GetMemory(),
8323  (*pAllocation)->GetOffset());
8324  if(res >= 0)
8325  {
8326  // All steps succeeded.
8327  if(pAllocationInfo != VMA_NULL)
8328  {
8329  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8330  }
8331  return VK_SUCCESS;
8332  }
8333  allocator->FreeMemory(*pAllocation);
8334  *pAllocation = VK_NULL_HANDLE;
8335  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8336  *pImage = VK_NULL_HANDLE;
8337  return res;
8338  }
8339  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8340  *pImage = VK_NULL_HANDLE;
8341  return res;
8342  }
8343  return res;
8344 }
8345 
8346 void vmaDestroyImage(
8347  VmaAllocator allocator,
8348  VkImage image,
8349  VmaAllocation allocation)
8350 {
8351  if(image != VK_NULL_HANDLE)
8352  {
8353  VMA_ASSERT(allocator);
8354 
8355  VMA_DEBUG_LOG("vmaDestroyImage");
8356 
8357  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8358 
8359  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8360 
8361  allocator->FreeMemory(allocation);
8362  }
8363 }
8364 
8365 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:670
+
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:893
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:695
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:680
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
-
Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
Definition: vk_mem_alloc.h:861
+
Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
Definition: vk_mem_alloc.h:867
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:674
-
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1142
+
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1148
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:692
-
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1308
-
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1012
+
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1314
+
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1018
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
-
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1066
-
Definition: vk_mem_alloc.h:924
+
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1072
+
Definition: vk_mem_alloc.h:930
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:663
-
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:963
-
Definition: vk_mem_alloc.h:871
+
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:969
+
Definition: vk_mem_alloc.h:877
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:707
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
-
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:754
+
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:760
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:689
VkDeviceSize preferredSmallHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
Definition: vk_mem_alloc.h:704
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
-
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:875
+
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:881
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
-
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:819
+
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:825
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:677
-
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:818
+
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:824
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:685
-
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1312
+
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1318
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:724
-
VmaStatInfo total
Definition: vk_mem_alloc.h:828
-
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1320
-
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:946
-
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1303
+
VmaStatInfo total
Definition: vk_mem_alloc.h:834
+
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1326
+
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:952
+
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1309
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:678
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:599
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:698
-
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1020
-
Definition: vk_mem_alloc.h:1014
-
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1152
+
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1026
+
Definition: vk_mem_alloc.h:1020
+
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1158
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:675
-
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:965
-
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1036
-
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1072
+
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:971
+
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1042
+
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1078
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:661
-
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1023
+
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1029
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
-
VmaMemoryUsage
Definition: vk_mem_alloc.h:856
+
VmaMemoryUsage
Definition: vk_mem_alloc.h:862
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
-
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1298
+
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1304
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
-
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1316
-
Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
Definition: vk_mem_alloc.h:867
+
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1322
+
Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
Definition: vk_mem_alloc.h:873
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:676
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
-
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:824
+
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:830
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:605
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:626
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:631
-
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1318
+
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1324
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
-
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:957
-
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1082
+
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:963
+
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1088
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:671
-
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:807
-
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1031
+
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:813
+
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1037
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:618
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
Definition: vk_mem_alloc.h:931
-
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:820
+
Definition: vk_mem_alloc.h:937
+
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:826
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:622
-
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1026
-
Memory will be used for frequent writing on device and readback on host (download).
Definition: vk_mem_alloc.h:870
+
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1032
+
Memory will be used for frequent writing on device and readback on host (download).
Definition: vk_mem_alloc.h:876
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
-
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:952
-
Definition: vk_mem_alloc.h:943
-
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:810
+
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:958
+
Definition: vk_mem_alloc.h:949
+
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:816
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:673
-
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:1044
+
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:1050
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:710
-
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1075
-
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:941
-
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:970
+
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1081
+
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:947
+
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:976
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
-
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:742
-
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:826
-
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:911
-
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:819
+
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:748
+
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:832
+
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:917
+
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:825
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:682
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:620
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:681
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
-
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1058
+
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1064
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
-
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1166
+
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1172
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
Definition: vk_mem_alloc.h:701
-
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:819
-
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:816
+
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:825
+
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:822
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
-
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1063
-
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1147
-
Definition: vk_mem_alloc.h:939
-
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1314
+
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1069
+
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1153
+
Definition: vk_mem_alloc.h:945
+
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1320
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:669
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:684
-
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:814
-
No intended memory usage specified. Use other members of VmaAllocationCreateInfo to specify your requ...
Definition: vk_mem_alloc.h:859
-
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1016
+
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:820
+
No intended memory usage specified. Use other members of VmaAllocationCreateInfo to specify your requ...
Definition: vk_mem_alloc.h:865
+
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1022
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
-
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:812
+
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:818
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:679
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:683
-
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:898
-
Memory will be mapped on host. Could be used for transfer to/from device.
Definition: vk_mem_alloc.h:864
-
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1161
+
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:904
+
Memory will be mapped on host. Could be used for transfer to/from device.
Definition: vk_mem_alloc.h:870
+
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1167
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:659
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:672
-
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1128
+
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1134
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:994
-
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:820
- -
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:827
+
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1000
+
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:826
+ +
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:833
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
-
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1069
-
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:820
-
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1133
+
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1075
+
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:826
+
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1139