#pragma once // Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #include #define GLFW_INCLUDE_NONE #include #include #include #include #include // std::unique_ptr namespace vk { namespace su { const uint64_t FenceTimeout = 100000000; template void oneTimeSubmit( vk::CommandBuffer const & commandBuffer, vk::Queue const & queue, Func const & func ) { commandBuffer.begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlagBits::eOneTimeSubmit ) ); func( commandBuffer ); commandBuffer.end(); queue.submit( vk::SubmitInfo( 0, nullptr, nullptr, 1, &commandBuffer ), nullptr ); queue.waitIdle(); } template void oneTimeSubmit( vk::Device const & device, vk::CommandPool const & commandPool, vk::Queue const & queue, Func const & func ) { vk::CommandBuffer commandBuffer = device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ).front(); oneTimeSubmit( commandBuffer, queue, func ); } template void copyToDevice( vk::Device const & device, vk::DeviceMemory const & deviceMemory, T const * pData, size_t count, vk::DeviceSize stride = sizeof( T ) ) { assert( sizeof( T ) <= stride ); uint8_t * deviceData = static_cast( device.mapMemory( deviceMemory, 0, count * stride ) ); if ( stride == sizeof( T ) ) { memcpy( deviceData, pData, count * sizeof( T ) ); } else { for ( size_t i = 0; i < count; i++ ) { memcpy( deviceData, &pData[i], sizeof( T ) ); deviceData += stride; } } device.unmapMemory( deviceMemory ); } template void copyToDevice( vk::Device const & device, vk::DeviceMemory const & deviceMemory, T const & data ) { copyToDevice( device, deviceMemory, &data, 1 ); } template VULKAN_HPP_INLINE constexpr const T & clamp( const T & v, const T & lo, const T & hi ) { return v < lo ? lo : hi < v ? hi : v; } void setImageLayout( vk::CommandBuffer const & commandBuffer, vk::Image image, vk::Format format, vk::ImageLayout oldImageLayout, vk::ImageLayout newImageLayout ); struct WindowData { WindowData( GLFWwindow * wnd, std::string const & name, vk::Extent2D const & extent ); WindowData( const WindowData & ) = delete; WindowData( WindowData && other ); ~WindowData() noexcept; GLFWwindow * handle; std::string name; vk::Extent2D extent; }; WindowData createWindow( std::string const & windowName, vk::Extent2D const & extent ); struct BufferData { BufferData( vk::PhysicalDevice const & physicalDevice, vk::Device const & device, vk::DeviceSize size, vk::BufferUsageFlags usage, vk::MemoryPropertyFlags propertyFlags = vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ); void clear( vk::Device const & device ) { device.freeMemory( deviceMemory ); device.destroyBuffer( buffer ); } template void upload( vk::Device const & device, DataType const & data ) const { assert( ( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostCoherent ) && ( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostVisible ) ); assert( sizeof( DataType ) <= m_size ); void * dataPtr = device.mapMemory( deviceMemory, 0, sizeof( DataType ) ); memcpy( dataPtr, &data, sizeof( DataType ) ); device.unmapMemory( deviceMemory ); } template void upload( vk::Device const & device, std::vector const & data, size_t stride = 0 ) const { assert( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostVisible ); size_t elementSize = stride ? stride : sizeof( DataType ); assert( sizeof( DataType ) <= elementSize ); copyToDevice( device, deviceMemory, data.data(), data.size(), elementSize ); } template void upload( vk::PhysicalDevice const & physicalDevice, vk::Device const & device, vk::CommandPool const & commandPool, vk::Queue queue, std::vector const & data, size_t stride ) const { assert( m_usage & vk::BufferUsageFlagBits::eTransferDst ); assert( m_propertyFlags & vk::MemoryPropertyFlagBits::eDeviceLocal ); size_t elementSize = stride ? stride : sizeof( DataType ); assert( sizeof( DataType ) <= elementSize ); size_t dataSize = data.size() * elementSize; assert( dataSize <= m_size ); vk::su::BufferData stagingBuffer( physicalDevice, device, dataSize, vk::BufferUsageFlagBits::eTransferSrc ); copyToDevice( device, stagingBuffer.deviceMemory, data.data(), data.size(), elementSize ); vk::su::oneTimeSubmit( device, commandPool, queue, [&]( vk::CommandBuffer const & commandBuffer ) { commandBuffer.copyBuffer( stagingBuffer.buffer, buffer, vk::BufferCopy( 0, 0, dataSize ) ); } ); stagingBuffer.clear( device ); } vk::Buffer buffer; vk::DeviceMemory deviceMemory; #if !defined( NDEBUG ) private: vk::DeviceSize m_size; vk::BufferUsageFlags m_usage; vk::MemoryPropertyFlags m_propertyFlags; #endif }; struct ImageData { ImageData( vk::PhysicalDevice const & physicalDevice, vk::Device const & device, vk::Format format, vk::Extent2D const & extent, vk::ImageTiling tiling, vk::ImageUsageFlags usage, vk::ImageLayout initialLayout, vk::MemoryPropertyFlags memoryProperties, vk::ImageAspectFlags aspectMask ); void clear( vk::Device const & device ) { device.destroyImageView( imageView ); device.freeMemory( deviceMemory ); device.destroyImage( image ); } vk::Format format; vk::Image image; vk::DeviceMemory deviceMemory; vk::ImageView imageView; }; struct DepthBufferData : public ImageData { DepthBufferData( vk::PhysicalDevice const & physicalDevice, vk::Device const & device, vk::Format format, vk::Extent2D const & extent ); }; struct SurfaceData { SurfaceData( vk::Instance const & instance, std::string const & windowName, vk::Extent2D const & extent ); vk::Extent2D extent; WindowData window; vk::SurfaceKHR surface; }; struct SwapChainData { SwapChainData( vk::PhysicalDevice const & physicalDevice, vk::Device const & device, vk::SurfaceKHR const & surface, vk::Extent2D const & extent, vk::ImageUsageFlags usage, vk::SwapchainKHR const & oldSwapChain, uint32_t graphicsFamilyIndex, uint32_t presentFamilyIndex ); void clear( vk::Device const & device ) { for ( auto & imageView : imageViews ) { device.destroyImageView( imageView ); } imageViews.clear(); images.clear(); device.destroySwapchainKHR( swapChain ); } vk::Format colorFormat; vk::SwapchainKHR swapChain; std::vector images; std::vector imageViews; }; class CheckerboardImageGenerator { public: CheckerboardImageGenerator( std::array const & rgb0 = { { 0, 0, 0 } }, std::array const & rgb1 = { { 255, 255, 255 } } ); void operator()( void * data, vk::Extent2D & extent ) const; private: std::array const & m_rgb0; std::array const & m_rgb1; }; class MonochromeImageGenerator { public: MonochromeImageGenerator( std::array const & rgb ); void operator()( void * data, vk::Extent2D const & extent ) const; private: std::array const & m_rgb; }; class PixelsImageGenerator { public: PixelsImageGenerator( vk::Extent2D const & extent, size_t channels, unsigned char const * pixels ); void operator()( void * data, vk::Extent2D const & extent ) const; private: vk::Extent2D m_extent; size_t m_channels; unsigned char const * m_pixels; }; struct TextureData { TextureData( vk::PhysicalDevice const & physicalDevice, vk::Device const & device, vk::Extent2D const & extent_ = { 256, 256 }, vk::ImageUsageFlags usageFlags = {}, vk::FormatFeatureFlags formatFeatureFlags = {}, bool anisotropyEnable = false, bool forceStaging = false ); void clear( vk::Device const & device ) { if ( stagingBufferData ) { stagingBufferData->clear( device ); } imageData->clear( device ); device.destroySampler( sampler ); } template void setImage( vk::Device const & device, vk::CommandBuffer const & commandBuffer, ImageGenerator const & imageGenerator ) { void * data = needsStaging ? device.mapMemory( stagingBufferData->deviceMemory, 0, device.getBufferMemoryRequirements( stagingBufferData->buffer ).size ) : device.mapMemory( imageData->deviceMemory, 0, device.getImageMemoryRequirements( imageData->image ).size ); imageGenerator( data, extent ); device.unmapMemory( needsStaging ? stagingBufferData->deviceMemory : imageData->deviceMemory ); if ( needsStaging ) { // Since we're going to blit to the texture image, set its layout to eTransferDstOptimal vk::su::setImageLayout( commandBuffer, imageData->image, imageData->format, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); vk::BufferImageCopy copyRegion( 0, extent.width, extent.height, vk::ImageSubresourceLayers( vk::ImageAspectFlagBits::eColor, 0, 0, 1 ), vk::Offset3D( 0, 0, 0 ), vk::Extent3D( extent, 1 ) ); commandBuffer.copyBufferToImage( stagingBufferData->buffer, imageData->image, vk::ImageLayout::eTransferDstOptimal, copyRegion ); // Set the layout for the texture image from eTransferDstOptimal to SHADER_READ_ONLY vk::su::setImageLayout( commandBuffer, imageData->image, imageData->format, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal ); } else { // If we can use the linear tiled image as a texture, just do it vk::su::setImageLayout( commandBuffer, imageData->image, imageData->format, vk::ImageLayout::ePreinitialized, vk::ImageLayout::eShaderReadOnlyOptimal ); } } vk::Format format; vk::Extent2D extent; bool needsStaging; std::unique_ptr stagingBufferData; std::unique_ptr imageData; vk::Sampler sampler; }; struct UUID { public: UUID( uint8_t const data[VK_UUID_SIZE] ); uint8_t m_data[VK_UUID_SIZE]; }; template VULKAN_HPP_INLINE TargetType checked_cast( SourceType value ) { static_assert( sizeof( TargetType ) <= sizeof( SourceType ), "No need to cast from smaller to larger type!" ); static_assert( std::numeric_limits::is_integer, "Only integer types supported!" ); static_assert( !std::numeric_limits::is_signed, "Only unsigned types supported!" ); static_assert( std::numeric_limits::is_integer, "Only integer types supported!" ); static_assert( !std::numeric_limits::is_signed, "Only unsigned types supported!" ); assert( value <= std::numeric_limits::max() ); return static_cast( value ); } vk::DeviceMemory allocateDeviceMemory( vk::Device const & device, vk::PhysicalDeviceMemoryProperties const & memoryProperties, vk::MemoryRequirements const & memoryRequirements, vk::MemoryPropertyFlags memoryPropertyFlags ); bool contains( std::vector const & extensionProperties, std::string const & extensionName ); vk::CommandPool createCommandPool( vk::Device const & device, uint32_t queueFamilyIndex ); vk::DescriptorPool createDescriptorPool( vk::Device const & device, std::vector const & poolSizes ); vk::DescriptorSetLayout createDescriptorSetLayout( vk::Device const & device, std::vector> const & bindingData, vk::DescriptorSetLayoutCreateFlags flags = {} ); vk::Device createDevice( vk::PhysicalDevice const & physicalDevice, uint32_t queueFamilyIndex, std::vector const & extensions = {}, vk::PhysicalDeviceFeatures const * physicalDeviceFeatures = nullptr, void const * pNext = nullptr ); std::vector createFramebuffers( vk::Device const & device, vk::RenderPass & renderPass, std::vector const & imageViews, vk::ImageView const & depthImageView, vk::Extent2D const & extent ); vk::Pipeline createGraphicsPipeline( vk::Device const & device, vk::PipelineCache const & pipelineCache, std::pair const & vertexShaderData, std::pair const & fragmentShaderData, uint32_t vertexStride, std::vector> const & vertexInputAttributeFormatOffset, vk::FrontFace frontFace, bool depthBuffered, vk::PipelineLayout const & pipelineLayout, vk::RenderPass const & renderPass ); vk::Instance createInstance( std::string const & appName, std::string const & engineName, std::vector const & layers = {}, std::vector const & extensions = {}, uint32_t apiVersion = VK_API_VERSION_1_0 ); vk::RenderPass createRenderPass( vk::Device const & device, vk::Format colorFormat, vk::Format depthFormat, vk::AttachmentLoadOp loadOp = vk::AttachmentLoadOp::eClear, vk::ImageLayout colorFinalLayout = vk::ImageLayout::ePresentSrcKHR ); VKAPI_ATTR VkBool32 VKAPI_CALL debugUtilsMessengerCallback( VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, VkDebugUtilsMessengerCallbackDataEXT const * pCallbackData, void * /*pUserData*/ ); uint32_t findGraphicsQueueFamilyIndex( std::vector const & queueFamilyProperties ); std::pair findGraphicsAndPresentQueueFamilyIndex( vk::PhysicalDevice physicalDevice, vk::SurfaceKHR const & surface ); uint32_t findMemoryType( vk::PhysicalDeviceMemoryProperties const & memoryProperties, uint32_t typeBits, vk::MemoryPropertyFlags requirementsMask ); std::vector gatherExtensions( std::vector const & extensions #if !defined( NDEBUG ) , std::vector const & extensionProperties #endif ); std::vector gatherLayers( std::vector const & layers #if !defined( NDEBUG ) , std::vector const & layerProperties #endif ); std::vector getDeviceExtensions(); std::vector getInstanceExtensions(); vk::DebugUtilsMessengerCreateInfoEXT makeDebugUtilsMessengerCreateInfoEXT(); #if defined( NDEBUG ) vk::StructureChain #else vk::StructureChain #endif makeInstanceCreateInfoChain( vk::ApplicationInfo const & applicationInfo, std::vector const & layers, std::vector const & extensions ); vk::Format pickDepthFormat( vk::PhysicalDevice const & physicalDevice ); vk::PresentModeKHR pickPresentMode( std::vector const & presentModes ); vk::SurfaceFormatKHR pickSurfaceFormat( std::vector const & formats ); void submitAndWait( vk::Device const & device, vk::Queue const & queue, vk::CommandBuffer const & commandBuffer ); void updateDescriptorSets( vk::Device const & device, vk::DescriptorSet const & descriptorSet, std::vector> const & bufferData, vk::su::TextureData const & textureData, uint32_t bindingOffset = 0 ); void updateDescriptorSets( vk::Device const & device, vk::DescriptorSet const & descriptorSet, std::vector> const & bufferData, std::vector const & textureData, uint32_t bindingOffset = 0 ); } // namespace su } // namespace vk std::ostream & operator<<( std::ostream & os, vk::su::UUID const & uuid );