From 2cb1c19c7febe75d27ab734d9e3acf341b836c66 Mon Sep 17 00:00:00 2001 From: asuessenbach Date: Wed, 17 Feb 2021 10:49:59 +0100 Subject: [PATCH] Introduce raii-compliant handle wrapper classes. --- CMakeLists.txt | 5 +- .../01_InitInstance/01_InitInstance.cpp | 62 + RAII_Samples/01_InitInstance/CMakeLists.txt | 39 + .../02_EnumerateDevices.cpp | 69 + .../02_EnumerateDevices/CMakeLists.txt | 35 + RAII_Samples/03_InitDevice/03_InitDevice.cpp | 68 + RAII_Samples/03_InitDevice/CMakeLists.txt | 35 + .../04_InitCommandBuffer.cpp | 71 + .../04_InitCommandBuffer/CMakeLists.txt | 35 + .../05_InitSwapchain/05_InitSwapchain.cpp | 192 + RAII_Samples/05_InitSwapchain/CMakeLists.txt | 35 + .../06_InitDepthBuffer/06_InitDepthBuffer.cpp | 123 + .../06_InitDepthBuffer/CMakeLists.txt | 35 + .../07_InitUniformBuffer.cpp | 106 + .../07_InitUniformBuffer/CMakeLists.txt | 35 + .../08_InitPipelineLayout.cpp | 74 + .../08_InitPipelineLayout/CMakeLists.txt | 35 + .../09_InitDescriptorSet.cpp | 100 + .../09_InitDescriptorSet/CMakeLists.txt | 35 + .../10_InitRenderPass/10_InitRenderPass.cpp | 109 + RAII_Samples/10_InitRenderPass/CMakeLists.txt | 35 + .../11_InitShaders/11_InitShaders.cpp | 84 + RAII_Samples/11_InitShaders/CMakeLists.txt | 35 + .../12_InitFrameBuffers.cpp | 94 + .../12_InitFrameBuffers/CMakeLists.txt | 35 + .../13_InitVertexBuffer.cpp | 151 + .../13_InitVertexBuffer/CMakeLists.txt | 35 + .../14_InitPipeline/14_InitPipeline.cpp | 200 + RAII_Samples/14_InitPipeline/CMakeLists.txt | 35 + RAII_Samples/15_DrawCube/15_DrawCube.cpp | 216 + RAII_Samples/15_DrawCube/CMakeLists.txt | 35 + RAII_Samples/16_Vulkan_1_1/16_Vulkan_1_1.cpp | 122 + RAII_Samples/16_Vulkan_1_1/CMakeLists.txt | 35 + RAII_Samples/CMakeLists.txt | 56 + RAII_Samples/CopyBlitImage/CMakeLists.txt | 35 + RAII_Samples/CopyBlitImage/CopyBlitImage.cpp | 266 + .../CreateDebugUtilsMessenger/CMakeLists.txt | 35 + .../CreateDebugUtilsMessenger.cpp | 144 + .../DebugUtilsObjectName/CMakeLists.txt | 35 + .../DebugUtilsObjectName.cpp | 75 + RAII_Samples/DrawTexturedCube/CMakeLists.txt | 35 + .../DrawTexturedCube/DrawTexturedCube.cpp | 214 + RAII_Samples/DynamicUniform/CMakeLists.txt | 35 + .../DynamicUniform/DynamicUniform.cpp | 254 + .../CMakeLists.txt | 35 + .../EnableValidationWithCallback.cpp | 211 + .../EnumerateDevicesAdvanced/CMakeLists.txt | 35 + .../EnumerateDevicesAdvanced.cpp | 86 + RAII_Samples/Events/CMakeLists.txt | 35 + RAII_Samples/Events/Events.cpp | 162 + RAII_Samples/ImmutableSampler/CMakeLists.txt | 35 + .../ImmutableSampler/ImmutableSampler.cpp | 238 + RAII_Samples/InitTexture/CMakeLists.txt | 35 + RAII_Samples/InitTexture/InitTexture.cpp | 230 + RAII_Samples/InputAttachment/CMakeLists.txt | 35 + .../InputAttachment/InputAttachment.cpp | 319 + .../CMakeLists.txt | 35 + .../InstanceExtensionProperties.cpp | 68 + .../CMakeLists.txt | 35 + .../InstanceLayerExtensionProperties.cpp | 103 + .../InstanceLayerProperties/CMakeLists.txt | 35 + .../InstanceLayerProperties.cpp | 67 + RAII_Samples/InstanceVersion/CMakeLists.txt | 35 + .../InstanceVersion/InstanceVersion.cpp | 58 + RAII_Samples/MultipleSets/CMakeLists.txt | 35 + RAII_Samples/MultipleSets/MultipleSets.cpp | 304 + RAII_Samples/OcclusionQuery/CMakeLists.txt | 35 + .../OcclusionQuery/OcclusionQuery.cpp | 270 + .../PhysicalDeviceExtensions/CMakeLists.txt | 35 + .../PhysicalDeviceExtensions.cpp | 80 + .../PhysicalDeviceFeatures/CMakeLists.txt | 35 + .../PhysicalDeviceFeatures.cpp | 746 ++ .../PhysicalDeviceGroups/CMakeLists.txt | 35 + .../PhysicalDeviceGroups.cpp | 107 + .../CMakeLists.txt | 37 + .../PhysicalDeviceMemoryProperties.cpp | 119 + .../PhysicalDeviceProperties/CMakeLists.txt | 35 + .../PhysicalDeviceProperties.cpp | 1272 ++ .../CMakeLists.txt | 35 + .../PhysicalDeviceQueueFamilyProperties.cpp | 101 + RAII_Samples/PipelineCache/CMakeLists.txt | 35 + RAII_Samples/PipelineCache/PipelineCache.cpp | 416 + .../PipelineDerivative/CMakeLists.txt | 35 + .../PipelineDerivative/PipelineDerivative.cpp | 332 + RAII_Samples/PushConstants/CMakeLists.txt | 35 + RAII_Samples/PushConstants/PushConstants.cpp | 280 + RAII_Samples/PushDescriptors/CMakeLists.txt | 35 + .../PushDescriptors/PushDescriptors.cpp | 247 + RAII_Samples/RayTracing/CMakeLists.txt | 43 + RAII_Samples/RayTracing/CameraManipulator.cpp | 439 + RAII_Samples/RayTracing/CameraManipulator.hpp | 89 + RAII_Samples/RayTracing/RayTracing.cpp | 1390 ++ .../SecondaryCommandBuffer/CMakeLists.txt | 35 + .../SecondaryCommandBuffer.cpp | 282 + .../SeparateImageSampler/CMakeLists.txt | 35 + .../SeparateImageSampler.cpp | 302 + .../SurfaceCapabilities/CMakeLists.txt | 39 + .../SurfaceCapabilities.cpp | 181 + RAII_Samples/SurfaceFormats/CMakeLists.txt | 35 + .../SurfaceFormats/SurfaceFormats.cpp | 82 + RAII_Samples/Template/CMakeLists.txt | 35 + RAII_Samples/Template/Template.cpp | 211 + RAII_Samples/TexelBuffer/CMakeLists.txt | 35 + RAII_Samples/TexelBuffer/TexelBuffer.cpp | 246 + RAII_Samples/utils/CMakeLists.txt | 37 + RAII_Samples/utils/shaders.hpp | 41 + RAII_Samples/utils/utils.hpp | 1053 ++ README.md | 4 + VulkanHppGenerator.cpp | 4923 +++++++- VulkanHppGenerator.hpp | 286 +- samples/01_InitInstance/01_InitInstance.cpp | 16 +- .../02_EnumerateDevices.cpp | 16 +- samples/03_InitDevice/03_InitDevice.cpp | 30 +- .../04_InitCommandBuffer.cpp | 35 +- samples/05_InitSwapchain/05_InitSwapchain.cpp | 66 +- .../06_InitDepthBuffer/06_InitDepthBuffer.cpp | 43 +- .../07_InitUniformBuffer.cpp | 68 +- .../08_InitPipelineLayout.cpp | 28 +- .../09_InitDescriptorSet.cpp | 44 +- .../10_InitRenderPass/10_InitRenderPass.cpp | 27 +- samples/11_InitShaders/11_InitShaders.cpp | 24 +- .../12_InitFrameBuffers.cpp | 49 +- .../13_InitVertexBuffer.cpp | 87 +- samples/14_InitPipeline/14_InitPipeline.cpp | 52 +- samples/15_DrawCube/15_DrawCube.cpp | 144 +- samples/16_Vulkan_1_1/16_Vulkan_1_1.cpp | 10 +- samples/CopyBlitImage/CopyBlitImage.cpp | 150 +- .../CreateDebugUtilsMessenger.cpp | 17 +- .../DebugUtilsObjectName.cpp | 20 +- samples/DrawTexturedCube/DrawTexturedCube.cpp | 147 +- samples/DynamicUniform/DynamicUniform.cpp | 171 +- .../EnableValidationWithCallback.cpp | 46 +- .../EnumerateDevicesAdvanced.cpp | 10 +- samples/Events/Events.cpp | 89 +- samples/ImmutableSampler/ImmutableSampler.cpp | 153 +- samples/InitTexture/InitTexture.cpp | 163 +- samples/InputAttachment/InputAttachment.cpp | 205 +- .../InstanceLayerExtensionProperties.cpp | 5 +- samples/InstanceVersion/InstanceVersion.cpp | 2 +- samples/MultipleSets/MultipleSets.cpp | 166 +- samples/OcclusionQuery/OcclusionQuery.cpp | 204 +- .../PhysicalDeviceExtensions.cpp | 12 +- .../PhysicalDeviceFeatures.cpp | 12 +- .../PhysicalDeviceGroups.cpp | 28 +- .../PhysicalDeviceProperties.cpp | 10 +- .../PhysicalDeviceQueueFamilyProperties.cpp | 10 +- samples/PipelineCache/PipelineCache.cpp | 150 +- .../PipelineDerivative/PipelineDerivative.cpp | 167 +- samples/PushConstants/PushConstants.cpp | 154 +- samples/PushDescriptors/PushDescriptors.cpp | 162 +- samples/RayTracing/RayTracing.cpp | 524 +- .../SecondaryCommandBuffer.cpp | 163 +- .../SeparateImageSampler.cpp | 194 +- .../SurfaceCapabilities.cpp | 26 +- samples/SurfaceFormats/SurfaceFormats.cpp | 13 +- samples/Template/Template.cpp | 155 +- samples/TexelBuffer/TexelBuffer.cpp | 158 +- samples/utils/math.cpp | 22 +- samples/utils/shaders.cpp | 8 +- samples/utils/shaders.hpp | 6 +- samples/utils/utils.cpp | 517 +- samples/utils/utils.hpp | 339 +- tests/Hash/Hash.cpp | 4 +- vulkan/vulkan.hpp | 3 +- vulkan/vulkan_raii.hpp | 10432 ++++++++++++++++ 165 files changed, 32669 insertions(+), 2892 deletions(-) create mode 100644 RAII_Samples/01_InitInstance/01_InitInstance.cpp create mode 100644 RAII_Samples/01_InitInstance/CMakeLists.txt create mode 100644 RAII_Samples/02_EnumerateDevices/02_EnumerateDevices.cpp create mode 100644 RAII_Samples/02_EnumerateDevices/CMakeLists.txt create mode 100644 RAII_Samples/03_InitDevice/03_InitDevice.cpp create mode 100644 RAII_Samples/03_InitDevice/CMakeLists.txt create mode 100644 RAII_Samples/04_InitCommandBuffer/04_InitCommandBuffer.cpp create mode 100644 RAII_Samples/04_InitCommandBuffer/CMakeLists.txt create mode 100644 RAII_Samples/05_InitSwapchain/05_InitSwapchain.cpp create mode 100644 RAII_Samples/05_InitSwapchain/CMakeLists.txt create mode 100644 RAII_Samples/06_InitDepthBuffer/06_InitDepthBuffer.cpp create mode 100644 RAII_Samples/06_InitDepthBuffer/CMakeLists.txt create mode 100644 RAII_Samples/07_InitUniformBuffer/07_InitUniformBuffer.cpp create mode 100644 RAII_Samples/07_InitUniformBuffer/CMakeLists.txt create mode 100644 RAII_Samples/08_InitPipelineLayout/08_InitPipelineLayout.cpp create mode 100644 RAII_Samples/08_InitPipelineLayout/CMakeLists.txt create mode 100644 RAII_Samples/09_InitDescriptorSet/09_InitDescriptorSet.cpp create mode 100644 RAII_Samples/09_InitDescriptorSet/CMakeLists.txt create mode 100644 RAII_Samples/10_InitRenderPass/10_InitRenderPass.cpp create mode 100644 RAII_Samples/10_InitRenderPass/CMakeLists.txt create mode 100644 RAII_Samples/11_InitShaders/11_InitShaders.cpp create mode 100644 RAII_Samples/11_InitShaders/CMakeLists.txt create mode 100644 RAII_Samples/12_InitFrameBuffers/12_InitFrameBuffers.cpp create mode 100644 RAII_Samples/12_InitFrameBuffers/CMakeLists.txt create mode 100644 RAII_Samples/13_InitVertexBuffer/13_InitVertexBuffer.cpp create mode 100644 RAII_Samples/13_InitVertexBuffer/CMakeLists.txt create mode 100644 RAII_Samples/14_InitPipeline/14_InitPipeline.cpp create mode 100644 RAII_Samples/14_InitPipeline/CMakeLists.txt create mode 100644 RAII_Samples/15_DrawCube/15_DrawCube.cpp create mode 100644 RAII_Samples/15_DrawCube/CMakeLists.txt create mode 100644 RAII_Samples/16_Vulkan_1_1/16_Vulkan_1_1.cpp create mode 100644 RAII_Samples/16_Vulkan_1_1/CMakeLists.txt create mode 100644 RAII_Samples/CMakeLists.txt create mode 100644 RAII_Samples/CopyBlitImage/CMakeLists.txt create mode 100644 RAII_Samples/CopyBlitImage/CopyBlitImage.cpp create mode 100644 RAII_Samples/CreateDebugUtilsMessenger/CMakeLists.txt create mode 100644 RAII_Samples/CreateDebugUtilsMessenger/CreateDebugUtilsMessenger.cpp create mode 100644 RAII_Samples/DebugUtilsObjectName/CMakeLists.txt create mode 100644 RAII_Samples/DebugUtilsObjectName/DebugUtilsObjectName.cpp create mode 100644 RAII_Samples/DrawTexturedCube/CMakeLists.txt create mode 100644 RAII_Samples/DrawTexturedCube/DrawTexturedCube.cpp create mode 100644 RAII_Samples/DynamicUniform/CMakeLists.txt create mode 100644 RAII_Samples/DynamicUniform/DynamicUniform.cpp create mode 100644 RAII_Samples/EnableValidationWithCallback/CMakeLists.txt create mode 100644 RAII_Samples/EnableValidationWithCallback/EnableValidationWithCallback.cpp create mode 100644 RAII_Samples/EnumerateDevicesAdvanced/CMakeLists.txt create mode 100644 RAII_Samples/EnumerateDevicesAdvanced/EnumerateDevicesAdvanced.cpp create mode 100644 RAII_Samples/Events/CMakeLists.txt create mode 100644 RAII_Samples/Events/Events.cpp create mode 100644 RAII_Samples/ImmutableSampler/CMakeLists.txt create mode 100644 RAII_Samples/ImmutableSampler/ImmutableSampler.cpp create mode 100644 RAII_Samples/InitTexture/CMakeLists.txt create mode 100644 RAII_Samples/InitTexture/InitTexture.cpp create mode 100644 RAII_Samples/InputAttachment/CMakeLists.txt create mode 100644 RAII_Samples/InputAttachment/InputAttachment.cpp create mode 100644 RAII_Samples/InstanceExtensionProperties/CMakeLists.txt create mode 100644 RAII_Samples/InstanceExtensionProperties/InstanceExtensionProperties.cpp create mode 100644 RAII_Samples/InstanceLayerExtensionProperties/CMakeLists.txt create mode 100644 RAII_Samples/InstanceLayerExtensionProperties/InstanceLayerExtensionProperties.cpp create mode 100644 RAII_Samples/InstanceLayerProperties/CMakeLists.txt create mode 100644 RAII_Samples/InstanceLayerProperties/InstanceLayerProperties.cpp create mode 100644 RAII_Samples/InstanceVersion/CMakeLists.txt create mode 100644 RAII_Samples/InstanceVersion/InstanceVersion.cpp create mode 100644 RAII_Samples/MultipleSets/CMakeLists.txt create mode 100644 RAII_Samples/MultipleSets/MultipleSets.cpp create mode 100644 RAII_Samples/OcclusionQuery/CMakeLists.txt create mode 100644 RAII_Samples/OcclusionQuery/OcclusionQuery.cpp create mode 100644 RAII_Samples/PhysicalDeviceExtensions/CMakeLists.txt create mode 100644 RAII_Samples/PhysicalDeviceExtensions/PhysicalDeviceExtensions.cpp create mode 100644 RAII_Samples/PhysicalDeviceFeatures/CMakeLists.txt create mode 100644 RAII_Samples/PhysicalDeviceFeatures/PhysicalDeviceFeatures.cpp create mode 100644 RAII_Samples/PhysicalDeviceGroups/CMakeLists.txt create mode 100644 RAII_Samples/PhysicalDeviceGroups/PhysicalDeviceGroups.cpp create mode 100644 RAII_Samples/PhysicalDeviceMemoryProperties/CMakeLists.txt create mode 100644 RAII_Samples/PhysicalDeviceMemoryProperties/PhysicalDeviceMemoryProperties.cpp create mode 100644 RAII_Samples/PhysicalDeviceProperties/CMakeLists.txt create mode 100644 RAII_Samples/PhysicalDeviceProperties/PhysicalDeviceProperties.cpp create mode 100644 RAII_Samples/PhysicalDeviceQueueFamilyProperties/CMakeLists.txt create mode 100644 RAII_Samples/PhysicalDeviceQueueFamilyProperties/PhysicalDeviceQueueFamilyProperties.cpp create mode 100644 RAII_Samples/PipelineCache/CMakeLists.txt create mode 100644 RAII_Samples/PipelineCache/PipelineCache.cpp create mode 100644 RAII_Samples/PipelineDerivative/CMakeLists.txt create mode 100644 RAII_Samples/PipelineDerivative/PipelineDerivative.cpp create mode 100644 RAII_Samples/PushConstants/CMakeLists.txt create mode 100644 RAII_Samples/PushConstants/PushConstants.cpp create mode 100644 RAII_Samples/PushDescriptors/CMakeLists.txt create mode 100644 RAII_Samples/PushDescriptors/PushDescriptors.cpp create mode 100644 RAII_Samples/RayTracing/CMakeLists.txt create mode 100644 RAII_Samples/RayTracing/CameraManipulator.cpp create mode 100644 RAII_Samples/RayTracing/CameraManipulator.hpp create mode 100644 RAII_Samples/RayTracing/RayTracing.cpp create mode 100644 RAII_Samples/SecondaryCommandBuffer/CMakeLists.txt create mode 100644 RAII_Samples/SecondaryCommandBuffer/SecondaryCommandBuffer.cpp create mode 100644 RAII_Samples/SeparateImageSampler/CMakeLists.txt create mode 100644 RAII_Samples/SeparateImageSampler/SeparateImageSampler.cpp create mode 100644 RAII_Samples/SurfaceCapabilities/CMakeLists.txt create mode 100644 RAII_Samples/SurfaceCapabilities/SurfaceCapabilities.cpp create mode 100644 RAII_Samples/SurfaceFormats/CMakeLists.txt create mode 100644 RAII_Samples/SurfaceFormats/SurfaceFormats.cpp create mode 100644 RAII_Samples/Template/CMakeLists.txt create mode 100644 RAII_Samples/Template/Template.cpp create mode 100644 RAII_Samples/TexelBuffer/CMakeLists.txt create mode 100644 RAII_Samples/TexelBuffer/TexelBuffer.cpp create mode 100644 RAII_Samples/utils/CMakeLists.txt create mode 100644 RAII_Samples/utils/shaders.hpp create mode 100644 RAII_Samples/utils/utils.hpp create mode 100644 vulkan/vulkan_raii.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index be13d57..b55b0ce 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -74,7 +74,9 @@ if (NOT DEFINED VulkanHeaders_INCLUDE_DIR) endif() file(TO_NATIVE_PATH ${VulkanHeaders_INCLUDE_DIR}/vulkan/vulkan.hpp vulkan_hpp) string(REPLACE "\\" "\\\\" vulkan_hpp ${vulkan_hpp}) -add_definitions(-DVULKAN_HPP_FILE="${vulkan_hpp}") +file(TO_NATIVE_PATH ${VulkanHeaders_INCLUDE_DIR}/vulkan/vulkan_raii.hpp vulkan_raii_hpp) +string(REPLACE "\\" "\\\\" vulkan_raii_hpp ${vulkan_raii_hpp}) +add_definitions(-DVULKAN_HPP_FILE="${vulkan_hpp}" -DVULKAN_RAII_HPP_FILE="${vulkan_raii_hpp}") include_directories(${VulkanHeaders_INCLUDE_DIR}) set(HEADERS @@ -146,6 +148,7 @@ if (SAMPLES_BUILD) add_subdirectory(glslang) # samples add_subdirectory(samples) + add_subdirectory(RAII_Samples) endif () option (TESTS_BUILD "Build tests" OFF) diff --git a/RAII_Samples/01_InitInstance/01_InitInstance.cpp b/RAII_Samples/01_InitInstance/01_InitInstance.cpp new file mode 100644 index 0000000..65de2d9 --- /dev/null +++ b/RAII_Samples/01_InitInstance/01_InitInstance.cpp @@ -0,0 +1,62 @@ +// Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 01_InitInstanceRAII +// Create and destroy a vk::UniqueInstance + +#include "vulkan/vulkan_raii.hpp" +#include "../utils/utils.hpp" +#include + +static std::string AppName = "01_InitInstanceRAII"; +static std::string EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + /* VULKAN_HPP_KEY_START */ + + try + { + // the very beginning: instantiate a context + std::unique_ptr context = vk::raii::su::make_unique(); + + // initialize the vk::ApplicationInfo structure + vk::ApplicationInfo applicationInfo( AppName.c_str(), 1, EngineName.c_str(), 1, VK_API_VERSION_1_1 ); + + // initialize the vk::InstanceCreateInfo + vk::InstanceCreateInfo instanceCreateInfo( {}, &applicationInfo ); + + // create an Instance + std::unique_ptr instance = vk::raii::su::make_unique( *context, instanceCreateInfo ); + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + + /* VULKAN_HPP_KEY_END */ + + return 0; +} diff --git a/RAII_Samples/01_InitInstance/CMakeLists.txt b/RAII_Samples/01_InitInstance/CMakeLists.txt new file mode 100644 index 0000000..c581beb --- /dev/null +++ b/RAII_Samples/01_InitInstance/CMakeLists.txt @@ -0,0 +1,39 @@ +# Copyright(c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +if(NOT SAMPLES_BUILD_ONLY_DYNAMIC) + + project(RAII_01_InitInstance) + + set(HEADERS + ) + + set(SOURCES + 01_InitInstance.cpp + ) + + source_group(headers FILES ${HEADERS}) + source_group(sources FILES ${SOURCES}) + + add_executable(RAII_01_InitInstance + ${HEADERS} + ${SOURCES} + ) + + set_target_properties(RAII_01_InitInstance PROPERTIES FOLDER "RAII_Samples") + target_link_libraries(RAII_01_InitInstance PRIVATE utils) + +endif() diff --git a/RAII_Samples/02_EnumerateDevices/02_EnumerateDevices.cpp b/RAII_Samples/02_EnumerateDevices/02_EnumerateDevices.cpp new file mode 100644 index 0000000..89a0118 --- /dev/null +++ b/RAII_Samples/02_EnumerateDevices/02_EnumerateDevices.cpp @@ -0,0 +1,69 @@ +// Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 02_EnumerateDevicesRAII +// Enumerate physical devices + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wunused-variable" +#elif defined( __GNUC__ ) +# pragma GCC diagnostic ignored "-Wunused-but-set-variable" +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../utils/utils.hpp" + +#include + +static std::string AppName = "02_EnumerateDevicesRAII"; +static std::string EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = vk::raii::su::makeUniqueInstance( *context, AppName, EngineName ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + + /* VULKAN_HPP_KEY_START */ + + // enumerate the physicalDevices + vk::raii::PhysicalDevices physicalDevices( *instance ); + + /* VULKAN_HPP_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/02_EnumerateDevices/CMakeLists.txt b/RAII_Samples/02_EnumerateDevices/CMakeLists.txt new file mode 100644 index 0000000..e169ccb --- /dev/null +++ b/RAII_Samples/02_EnumerateDevices/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_02_EnumerateDevices) + +set(HEADERS +) + +set(SOURCES + 02_EnumerateDevices.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_02_EnumerateDevices + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_02_EnumerateDevices PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_02_EnumerateDevices PRIVATE utils) diff --git a/RAII_Samples/03_InitDevice/03_InitDevice.cpp b/RAII_Samples/03_InitDevice/03_InitDevice.cpp new file mode 100644 index 0000000..e9e8706 --- /dev/null +++ b/RAII_Samples/03_InitDevice/03_InitDevice.cpp @@ -0,0 +1,68 @@ +// Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 03_InitDeviceRAII +// Create and destroy a device + +#include "../utils/utils.hpp" + +#include + +static char const * AppName = "03_InitDeviceRAII"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = vk::raii::su::makeUniqueInstance( *context, AppName, EngineName ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + /* VULKAN_HPP_KEY_START */ + + // find the index of the first queue family that supports graphics + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevice->getQueueFamilyProperties() ); + + // create a Device + float queuePriority = 0.0f; + vk::DeviceQueueCreateInfo deviceQueueCreateInfo( {}, graphicsQueueFamilyIndex, 1, &queuePriority ); + vk::DeviceCreateInfo deviceCreateInfo( {}, deviceQueueCreateInfo ); + + std::unique_ptr device = vk::raii::su::make_unique( *physicalDevice, deviceCreateInfo ); + + /* VULKAN_HPP_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/03_InitDevice/CMakeLists.txt b/RAII_Samples/03_InitDevice/CMakeLists.txt new file mode 100644 index 0000000..e846c8f --- /dev/null +++ b/RAII_Samples/03_InitDevice/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_03_InitDevice) + +set(HEADERS +) + +set(SOURCES + 03_InitDevice.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_03_InitDevice + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_03_InitDevice PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_03_InitDevice PRIVATE utils) diff --git a/RAII_Samples/04_InitCommandBuffer/04_InitCommandBuffer.cpp b/RAII_Samples/04_InitCommandBuffer/04_InitCommandBuffer.cpp new file mode 100644 index 0000000..fdda6f5 --- /dev/null +++ b/RAII_Samples/04_InitCommandBuffer/04_InitCommandBuffer.cpp @@ -0,0 +1,71 @@ +// Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 04_InitCommandBufferRAII +// Create command buffer + +#include "../utils/utils.hpp" + +#include + +static char const * AppName = "04_InitCommandBufferRAII"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = vk::raii::su::makeUniqueInstance( *context, AppName, EngineName ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevice->getQueueFamilyProperties() ); + std::unique_ptr device = + vk::raii::su::makeUniqueDevice( *physicalDevice, graphicsQueueFamilyIndex ); + + /* VULKAN_HPP_KEY_START */ + + // create a CommandPool to allocate a CommandBuffer from + vk::CommandPoolCreateInfo commandPoolCreateInfo( {}, graphicsQueueFamilyIndex ); + std::unique_ptr commandPool = + vk::raii::su::make_unique( *device, commandPoolCreateInfo ); + + // allocate a CommandBuffer from the CommandPool + vk::CommandBufferAllocateInfo commandBufferAllocateInfo( **commandPool, vk::CommandBufferLevel::ePrimary, 1 ); + std::unique_ptr commandBuffer = vk::raii::su::make_unique( + std::move( vk::raii::CommandBuffers( *device, commandBufferAllocateInfo ).front() ) ); + + /* VULKAN_HPP_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/04_InitCommandBuffer/CMakeLists.txt b/RAII_Samples/04_InitCommandBuffer/CMakeLists.txt new file mode 100644 index 0000000..c10cf4e --- /dev/null +++ b/RAII_Samples/04_InitCommandBuffer/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_04_InitCommandBuffer) + +set(HEADERS +) + +set(SOURCES + 04_InitCommandBuffer.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_04_InitCommandBuffer + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_04_InitCommandBuffer PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_04_InitCommandBuffer PRIVATE utils) diff --git a/RAII_Samples/05_InitSwapchain/05_InitSwapchain.cpp b/RAII_Samples/05_InitSwapchain/05_InitSwapchain.cpp new file mode 100644 index 0000000..3f450bf --- /dev/null +++ b/RAII_Samples/05_InitSwapchain/05_InitSwapchain.cpp @@ -0,0 +1,192 @@ +// Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 05_InitSwapchainRAII +// Initialize a swapchain + +#include "../utils/utils.hpp" + +#include + +static char const * AppName = "05_InitSwapchainRAII"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + std::vector queueFamilyProperties = physicalDevice->getQueueFamilyProperties(); + uint32_t graphicsQueueFamilyIndex = vk::su::findGraphicsQueueFamilyIndex( queueFamilyProperties ); + + /* VULKAN_HPP_KEY_START */ + + uint32_t width = 64; + uint32_t height = 64; + vk::su::WindowData window = vk::su::createWindow( AppName, { width, height } ); + VkSurfaceKHR _surface; + glfwCreateWindowSurface( static_cast( **instance ), window.handle, nullptr, &_surface ); + std::unique_ptr surface = vk::raii::su::make_unique( *instance, _surface ); + + // determine a queueFamilyIndex that suports present + // first check if the graphicsQueueFamiliyIndex is good enough + uint32_t presentQueueFamilyIndex = physicalDevice->getSurfaceSupportKHR( graphicsQueueFamilyIndex, **surface ) + ? graphicsQueueFamilyIndex + : vk::su::checked_cast( queueFamilyProperties.size() ); + if ( presentQueueFamilyIndex == queueFamilyProperties.size() ) + { + // the graphicsQueueFamilyIndex doesn't support present -> look for an other family index that supports both + // graphics and present + for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) + { + if ( ( queueFamilyProperties[i].queueFlags & vk::QueueFlagBits::eGraphics ) && + physicalDevice->getSurfaceSupportKHR( vk::su::checked_cast( i ), **surface ) ) + { + graphicsQueueFamilyIndex = vk::su::checked_cast( i ); + presentQueueFamilyIndex = graphicsQueueFamilyIndex; + break; + } + } + if ( presentQueueFamilyIndex == queueFamilyProperties.size() ) + { + // there's nothing like a single family index that supports both graphics and present -> look for an other + // family index that supports present + for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) + { + if ( physicalDevice->getSurfaceSupportKHR( vk::su::checked_cast( i ), **surface ) ) + { + presentQueueFamilyIndex = vk::su::checked_cast( i ); + break; + } + } + } + } + if ( ( graphicsQueueFamilyIndex == queueFamilyProperties.size() ) || + ( presentQueueFamilyIndex == queueFamilyProperties.size() ) ) + { + throw std::runtime_error( "Could not find a queue for graphics or present -> terminating" ); + } + + // create a device + std::unique_ptr device = + vk::raii::su::makeUniqueDevice( *physicalDevice, graphicsQueueFamilyIndex, vk::su::getDeviceExtensions() ); + + // get the supported VkFormats + std::vector formats = physicalDevice->getSurfaceFormatsKHR( **surface ); + assert( !formats.empty() ); + vk::Format format = + ( formats[0].format == vk::Format::eUndefined ) ? vk::Format::eB8G8R8A8Unorm : formats[0].format; + + vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice->getSurfaceCapabilitiesKHR( **surface ); + VkExtent2D swapchainExtent; + if ( surfaceCapabilities.currentExtent.width == std::numeric_limits::max() ) + { + // If the surface size is undefined, the size is set to the size of the images requested. + swapchainExtent.width = + vk::su::clamp( width, surfaceCapabilities.minImageExtent.width, surfaceCapabilities.maxImageExtent.width ); + swapchainExtent.height = + vk::su::clamp( height, surfaceCapabilities.minImageExtent.height, surfaceCapabilities.maxImageExtent.height ); + } + else + { + // If the surface size is defined, the swap chain size must match + swapchainExtent = surfaceCapabilities.currentExtent; + } + + // The FIFO present mode is guaranteed by the spec to be supported + vk::PresentModeKHR swapchainPresentMode = vk::PresentModeKHR::eFifo; + + vk::SurfaceTransformFlagBitsKHR preTransform = + ( surfaceCapabilities.supportedTransforms & vk::SurfaceTransformFlagBitsKHR::eIdentity ) + ? vk::SurfaceTransformFlagBitsKHR::eIdentity + : surfaceCapabilities.currentTransform; + + vk::CompositeAlphaFlagBitsKHR compositeAlpha = + ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::ePreMultiplied ) + ? vk::CompositeAlphaFlagBitsKHR::ePreMultiplied + : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::ePostMultiplied ) + ? vk::CompositeAlphaFlagBitsKHR::ePostMultiplied + : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::eInherit ) + ? vk::CompositeAlphaFlagBitsKHR::eInherit + : vk::CompositeAlphaFlagBitsKHR::eOpaque; + + vk::SwapchainCreateInfoKHR swapChainCreateInfo( vk::SwapchainCreateFlagsKHR(), + **surface, + surfaceCapabilities.minImageCount, + format, + vk::ColorSpaceKHR::eSrgbNonlinear, + swapchainExtent, + 1, + vk::ImageUsageFlagBits::eColorAttachment, + vk::SharingMode::eExclusive, + {}, + preTransform, + compositeAlpha, + swapchainPresentMode, + true, + nullptr ); + + std::array queueFamilyIndices = { graphicsQueueFamilyIndex, presentQueueFamilyIndex }; + if ( graphicsQueueFamilyIndex != presentQueueFamilyIndex ) + { + // If the graphics and present queues are from different queue families, we either have to explicitly transfer + // ownership of images between the queues, or we have to create the swapchain with imageSharingMode as + // VK_SHARING_MODE_CONCURRENT + swapChainCreateInfo.imageSharingMode = vk::SharingMode::eConcurrent; + swapChainCreateInfo.queueFamilyIndexCount = vk::su::checked_cast( queueFamilyIndices.size() ); + swapChainCreateInfo.pQueueFamilyIndices = queueFamilyIndices.data(); + } + + std::unique_ptr swapChain = + vk::raii::su::make_unique( *device, swapChainCreateInfo ); + std::vector swapChainImages = swapChain->getImages(); + + std::vector> imageViews; + imageViews.reserve( swapChainImages.size() ); + vk::ComponentMapping componentMapping( + vk::ComponentSwizzle::eR, vk::ComponentSwizzle::eG, vk::ComponentSwizzle::eB, vk::ComponentSwizzle::eA ); + vk::ImageSubresourceRange subResourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ); + for ( auto image : swapChainImages ) + { + vk::ImageViewCreateInfo imageViewCreateInfo( + {}, static_cast( image ), vk::ImageViewType::e2D, format, componentMapping, subResourceRange ); + imageViews.push_back( vk::raii::su::make_unique( *device, imageViewCreateInfo ) ); + } + + /* VULKAN_HPP_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/05_InitSwapchain/CMakeLists.txt b/RAII_Samples/05_InitSwapchain/CMakeLists.txt new file mode 100644 index 0000000..c6df6a4 --- /dev/null +++ b/RAII_Samples/05_InitSwapchain/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_05_InitSwapchain) + +set(HEADERS +) + +set(SOURCES + 05_InitSwapchain.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_05_InitSwapchain + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_05_InitSwapchain PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_05_InitSwapchain PRIVATE utils) diff --git a/RAII_Samples/06_InitDepthBuffer/06_InitDepthBuffer.cpp b/RAII_Samples/06_InitDepthBuffer/06_InitDepthBuffer.cpp new file mode 100644 index 0000000..9bf6d2e --- /dev/null +++ b/RAII_Samples/06_InitDepthBuffer/06_InitDepthBuffer.cpp @@ -0,0 +1,123 @@ +// Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 06_InitDepthBufferRAII +// Initialize a depth buffer + +#include "../utils/utils.hpp" + +#include + +static char const * AppName = "06_InitDepthBuffer"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + /* VULKAN_HPP_KEY_START */ + + const vk::Format depthFormat = vk::Format::eD16Unorm; + vk::FormatProperties formatProperties = physicalDevice->getFormatProperties( depthFormat ); + + vk::ImageTiling tiling; + if ( formatProperties.linearTilingFeatures & vk::FormatFeatureFlagBits::eDepthStencilAttachment ) + { + tiling = vk::ImageTiling::eLinear; + } + else if ( formatProperties.optimalTilingFeatures & vk::FormatFeatureFlagBits::eDepthStencilAttachment ) + { + tiling = vk::ImageTiling::eOptimal; + } + else + { + throw std::runtime_error( "DepthStencilAttachment is not supported for D16Unorm depth format." ); + } + vk::ImageCreateInfo imageCreateInfo( {}, + vk::ImageType::e2D, + depthFormat, + vk::Extent3D( surfaceData.extent, 1 ), + 1, + 1, + vk::SampleCountFlagBits::e1, + tiling, + vk::ImageUsageFlagBits::eDepthStencilAttachment ); + std::unique_ptr depthImage = vk::raii::su::make_unique( *device, imageCreateInfo ); + + vk::PhysicalDeviceMemoryProperties memoryProperties = physicalDevice->getMemoryProperties(); + vk::MemoryRequirements memoryRequirements = depthImage->getMemoryRequirements(); + + uint32_t typeBits = memoryRequirements.memoryTypeBits; + uint32_t typeIndex = uint32_t( ~0 ); + for ( uint32_t i = 0; i < memoryProperties.memoryTypeCount; i++ ) + { + if ( ( typeBits & 1 ) && + ( ( memoryProperties.memoryTypes[i].propertyFlags & vk::MemoryPropertyFlagBits::eDeviceLocal ) == + vk::MemoryPropertyFlagBits::eDeviceLocal ) ) + { + typeIndex = i; + break; + } + typeBits >>= 1; + } + assert( typeIndex != uint32_t( ~0 ) ); + + vk::MemoryAllocateInfo memoryAllocateInfo( memoryRequirements.size, typeIndex ); + std::unique_ptr depthMemory = + vk::raii::su::make_unique( *device, memoryAllocateInfo ); + depthImage->bindMemory( **depthMemory, 0 ); + + vk::ComponentMapping componentMapping( + vk::ComponentSwizzle::eR, vk::ComponentSwizzle::eG, vk::ComponentSwizzle::eB, vk::ComponentSwizzle::eA ); + vk::ImageSubresourceRange subResourceRange( vk::ImageAspectFlagBits::eDepth, 0, 1, 0, 1 ); + vk::ImageViewCreateInfo imageViewCreateInfo( + {}, **depthImage, vk::ImageViewType::e2D, depthFormat, componentMapping, subResourceRange ); + std::unique_ptr depthView = + vk::raii::su::make_unique( *device, imageViewCreateInfo ); + + /* VULKAN_HPP_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/06_InitDepthBuffer/CMakeLists.txt b/RAII_Samples/06_InitDepthBuffer/CMakeLists.txt new file mode 100644 index 0000000..4f10bde --- /dev/null +++ b/RAII_Samples/06_InitDepthBuffer/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_06_InitDepthBuffer) + +set(HEADERS +) + +set(SOURCES + 06_InitDepthBuffer.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_06_InitDepthBuffer + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_06_InitDepthBuffer PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_06_InitDepthBuffer PRIVATE utils) diff --git a/RAII_Samples/07_InitUniformBuffer/07_InitUniformBuffer.cpp b/RAII_Samples/07_InitUniformBuffer/07_InitUniformBuffer.cpp new file mode 100644 index 0000000..592e6ec --- /dev/null +++ b/RAII_Samples/07_InitUniformBuffer/07_InitUniformBuffer.cpp @@ -0,0 +1,106 @@ +// Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 07_InitUniformBufferRAII +// Initialize a uniform buffer + +#if defined( _MSC_VER ) +# pragma warning( disable : 4127 ) // disable warning 4127: conditional expression is constant +# pragma warning( disable : 4201 ) // disable warning C4201: nonstandard extension used: nameless struct/union; needed + // to get glm/detail/type_vec?.hpp without warnings +#elif defined( __GNUC__ ) +// don't know how to switch off that warning here +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../utils/utils.hpp" + +#include + +#define GLM_FORCE_RADIANS +#include + +static char const * AppName = "07_InitUniformBuffer"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevice->getQueueFamilyProperties() ); + std::unique_ptr device = + vk::raii::su::makeUniqueDevice( *physicalDevice, graphicsQueueFamilyIndex ); + + /* VULKAN_HPP_KEY_START */ + + glm::mat4x4 model = glm::mat4x4( 1.0f ); + glm::mat4x4 view = + glm::lookAt( glm::vec3( -5.0f, 3.0f, -10.0f ), glm::vec3( 0.0f, 0.0f, 0.0f ), glm::vec3( 0.0f, -1.0f, 0.0f ) ); + glm::mat4x4 projection = glm::perspective( glm::radians( 45.0f ), 1.0f, 0.1f, 100.0f ); + // clang-format off + glm::mat4x4 clip = glm::mat4x4( 1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, -1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.5f, 0.0f, + 0.0f, 0.0f, 0.5f, 1.0f ); // vulkan clip space has inverted y and half z ! + // clang-format on + glm::mat4x4 mvpc = clip * projection * view * model; + + vk::BufferCreateInfo bufferCreateInfo( {}, sizeof( mvpc ), vk::BufferUsageFlagBits::eUniformBuffer ); + std::unique_ptr uniformDataBuffer = + vk::raii::su::make_unique( *device, bufferCreateInfo ); + vk::MemoryRequirements memoryRequirements = uniformDataBuffer->getMemoryRequirements(); + + uint32_t typeIndex = + vk::su::findMemoryType( physicalDevice->getMemoryProperties(), + memoryRequirements.memoryTypeBits, + vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ); + vk::MemoryAllocateInfo memoryAllocateInfo( memoryRequirements.size, typeIndex ); + std::unique_ptr uniformDataMemory = + vk::raii::su::make_unique( *device, memoryAllocateInfo ); + + uint8_t * pData = static_cast( uniformDataMemory->mapMemory( 0, memoryRequirements.size ) ); + memcpy( pData, &mvpc, sizeof( mvpc ) ); + uniformDataMemory->unmapMemory(); + + uniformDataBuffer->bindMemory( **uniformDataMemory, 0 ); + + /* VULKAN_HPP_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/07_InitUniformBuffer/CMakeLists.txt b/RAII_Samples/07_InitUniformBuffer/CMakeLists.txt new file mode 100644 index 0000000..1efa49c --- /dev/null +++ b/RAII_Samples/07_InitUniformBuffer/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_07_InitUniformBuffer) + +set(HEADERS +) + +set(SOURCES + 07_InitUniformBuffer.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_07_InitUniformBuffer + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_07_InitUniformBuffer PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_07_InitUniformBuffer PRIVATE utils) diff --git a/RAII_Samples/08_InitPipelineLayout/08_InitPipelineLayout.cpp b/RAII_Samples/08_InitPipelineLayout/08_InitPipelineLayout.cpp new file mode 100644 index 0000000..9ca8ebf --- /dev/null +++ b/RAII_Samples/08_InitPipelineLayout/08_InitPipelineLayout.cpp @@ -0,0 +1,74 @@ +// Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 08_InitPipelineLayoutRAII +// Initialize a descriptor and pipeline layout + +#include "../utils/utils.hpp" + +#include + +static char const * AppName = "08_InitPipelineLayout"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevice->getQueueFamilyProperties() ); + std::unique_ptr device = + vk::raii::su::makeUniqueDevice( *physicalDevice, graphicsQueueFamilyIndex ); + + /* VULKAN_HPP_KEY_START */ + + // create a DescriptorSetLayout + vk::DescriptorSetLayoutBinding descriptorSetLayoutBinding( + 0, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex ); + vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo( {}, descriptorSetLayoutBinding ); + std::unique_ptr descriptorSetLayout = + vk::raii::su::make_unique( *device, descriptorSetLayoutCreateInfo ); + + // create a PipelineLayout using that DescriptorSetLayout + vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo( {}, **descriptorSetLayout ); + std::unique_ptr pipelineLayout = + vk::raii::su::make_unique( *device, pipelineLayoutCreateInfo ); + + /* VULKAN_HPP_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/08_InitPipelineLayout/CMakeLists.txt b/RAII_Samples/08_InitPipelineLayout/CMakeLists.txt new file mode 100644 index 0000000..b67ecb8 --- /dev/null +++ b/RAII_Samples/08_InitPipelineLayout/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_08_InitPipelineLayout) + +set(HEADERS +) + +set(SOURCES + 08_InitPipelineLayout.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_08_InitPipelineLayout + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_08_InitPipelineLayout PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_08_InitPipelineLayout PRIVATE utils) diff --git a/RAII_Samples/09_InitDescriptorSet/09_InitDescriptorSet.cpp b/RAII_Samples/09_InitDescriptorSet/09_InitDescriptorSet.cpp new file mode 100644 index 0000000..3aba957 --- /dev/null +++ b/RAII_Samples/09_InitDescriptorSet/09_InitDescriptorSet.cpp @@ -0,0 +1,100 @@ +// Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 09_InitDescriptorSet +// Initialize a descriptor set + +#if defined( _MSC_VER ) +# pragma warning( disable : 4201 ) // disable warning C4201: nonstandard extension used: nameless struct/union; needed + // to get glm/detail/type_vec?.hpp without warnings +#elif defined( __GNUC__ ) +// don't know how to switch off that warning here +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../../samples/utils/math.hpp" +#include "../utils/utils.hpp" + +#include + +#define GLM_FORCE_RADIANS +#include + +static char const * AppName = "09_InitDescriptorSet"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevice->getQueueFamilyProperties() ); + std::unique_ptr device = + vk::raii::su::makeUniqueDevice( *physicalDevice, graphicsQueueFamilyIndex ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( vk::Extent2D( 0, 0 ) ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcMatrix ); + + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); + + /* VULKAN_HPP_KEY_START */ + + // create a descriptor pool + vk::DescriptorPoolSize poolSize( vk::DescriptorType::eUniformBuffer, 1 ); + vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo( + vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 1, poolSize ); + std::unique_ptr descriptorPool = + vk::raii::su::make_unique( *device, descriptorPoolCreateInfo ); + + // allocate a descriptor set + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( **descriptorPool, **descriptorSetLayout ); + std::unique_ptr descriptorSet = vk::raii::su::make_unique( + std::move( vk::raii::DescriptorSets( *device, descriptorSetAllocateInfo ).front() ) ); + + vk::DescriptorBufferInfo descriptorBufferInfo( **uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::WriteDescriptorSet writeDescriptorSet( + **descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, descriptorBufferInfo ); + device->updateDescriptorSets( writeDescriptorSet, nullptr ); + + /* VULKAN_HPP_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/09_InitDescriptorSet/CMakeLists.txt b/RAII_Samples/09_InitDescriptorSet/CMakeLists.txt new file mode 100644 index 0000000..788dec6 --- /dev/null +++ b/RAII_Samples/09_InitDescriptorSet/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_09_InitDescriptorSet) + +set(HEADERS +) + +set(SOURCES + 09_InitDescriptorSet.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_09_InitDescriptorSet + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_09_InitDescriptorSet PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_09_InitDescriptorSet PRIVATE utils) diff --git a/RAII_Samples/10_InitRenderPass/10_InitRenderPass.cpp b/RAII_Samples/10_InitRenderPass/10_InitRenderPass.cpp new file mode 100644 index 0000000..7456e5c --- /dev/null +++ b/RAII_Samples/10_InitRenderPass/10_InitRenderPass.cpp @@ -0,0 +1,109 @@ +// Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 10_InitRenderPass +// Initialize a render pass + +#if defined( _MSC_VER ) +# pragma warning( disable : 4201 ) // disable warning C4201: nonstandard extension used: nameless struct/union; needed + // to get glm/detail/type_vec?.hpp without warnings +#elif defined( __GNUC__ ) +// don't know how to switch off that warning here +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../utils/utils.hpp" + +#include + +#define GLM_FORCE_RADIANS +#include + +static char const * AppName = "10_InitRenderPass"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + vk::Format depthFormat = vk::Format::eD16Unorm; + + /* VULKAN_HPP_KEY_START */ + + std::array attachmentDescriptions; + attachmentDescriptions[0] = vk::AttachmentDescription( {}, + colorFormat, + vk::SampleCountFlagBits::e1, + vk::AttachmentLoadOp::eClear, + vk::AttachmentStoreOp::eStore, + vk::AttachmentLoadOp::eDontCare, + vk::AttachmentStoreOp::eDontCare, + vk::ImageLayout::eUndefined, + vk::ImageLayout::ePresentSrcKHR ); + attachmentDescriptions[1] = vk::AttachmentDescription( {}, + depthFormat, + vk::SampleCountFlagBits::e1, + vk::AttachmentLoadOp::eClear, + vk::AttachmentStoreOp::eDontCare, + vk::AttachmentLoadOp::eDontCare, + vk::AttachmentStoreOp::eDontCare, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eDepthStencilAttachmentOptimal ); + + vk::AttachmentReference colorReference( 0, vk::ImageLayout::eColorAttachmentOptimal ); + vk::AttachmentReference depthReference( 1, vk::ImageLayout::eDepthStencilAttachmentOptimal ); + vk::SubpassDescription subpass( {}, vk::PipelineBindPoint::eGraphics, {}, colorReference, {}, &depthReference ); + + vk::RenderPassCreateInfo renderPassCreateInfo( {}, attachmentDescriptions, subpass ); + std::unique_ptr renderPass = + vk::raii::su::make_unique( *device, renderPassCreateInfo ); + + /* VULKAN_HPP_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/10_InitRenderPass/CMakeLists.txt b/RAII_Samples/10_InitRenderPass/CMakeLists.txt new file mode 100644 index 0000000..99a8593 --- /dev/null +++ b/RAII_Samples/10_InitRenderPass/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_10_InitRenderPass) + +set(HEADERS +) + +set(SOURCES + 10_InitRenderPass.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_10_InitRenderPass + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_10_InitRenderPass PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_10_InitRenderPass PRIVATE utils) diff --git a/RAII_Samples/11_InitShaders/11_InitShaders.cpp b/RAII_Samples/11_InitShaders/11_InitShaders.cpp new file mode 100644 index 0000000..2e68045 --- /dev/null +++ b/RAII_Samples/11_InitShaders/11_InitShaders.cpp @@ -0,0 +1,84 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 11_InitShaders +// Initialize vertex and fragment shaders + +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" + +#include + +static char const * AppName = "11_InitShaders"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevice->getQueueFamilyProperties() ); + std::unique_ptr device = + vk::raii::su::makeUniqueDevice( *physicalDevice, graphicsQueueFamilyIndex ); + + /* VULKAN_HPP_KEY_START */ + + glslang::InitializeProcess(); + + std::vector vertexShaderSPV; + bool ok = vk::su::GLSLtoSPV( vk::ShaderStageFlagBits::eVertex, vertexShaderText_PC_C, vertexShaderSPV ); + assert( ok ); + + vk::ShaderModuleCreateInfo vertexShaderModuleCreateInfo( {}, vertexShaderSPV ); + std::unique_ptr vertexShaderModule = + vk::raii::su::make_unique( *device, vertexShaderModuleCreateInfo ); + + std::vector fragmentShaderSPV; + ok = vk::su::GLSLtoSPV( vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C, fragmentShaderSPV ); + assert( ok ); + + vk::ShaderModuleCreateInfo fragmentShaderModuleCreateInfo( {}, fragmentShaderSPV ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::make_unique( *device, fragmentShaderModuleCreateInfo ); + + glslang::FinalizeProcess(); + + /* VULKAN_HPP_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/11_InitShaders/CMakeLists.txt b/RAII_Samples/11_InitShaders/CMakeLists.txt new file mode 100644 index 0000000..e222f76 --- /dev/null +++ b/RAII_Samples/11_InitShaders/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_11_InitShaders) + +set(HEADERS +) + +set(SOURCES + 11_InitShaders.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_11_InitShaders + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_11_InitShaders PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_11_InitShaders PRIVATE utils) diff --git a/RAII_Samples/12_InitFrameBuffers/12_InitFrameBuffers.cpp b/RAII_Samples/12_InitFrameBuffers/12_InitFrameBuffers.cpp new file mode 100644 index 0000000..fa5eb3a --- /dev/null +++ b/RAII_Samples/12_InitFrameBuffers/12_InitFrameBuffers.cpp @@ -0,0 +1,94 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 12_InitFrameBuffers +// Initialize framebuffers + +#include "../utils/utils.hpp" + +#include + +static char const * AppName = "12_InitFrameBuffers"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 64, 64 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, swapChainData.colorFormat, depthBufferData.format ); + + /* VULKAN_KEY_START */ + + std::array attachments; + attachments[1] = **depthBufferData.imageView; + + std::vector> framebuffers; + framebuffers.reserve( swapChainData.imageViews.size() ); + for ( auto const & view : swapChainData.imageViews ) + { + attachments[0] = *view; + vk::FramebufferCreateInfo framebufferCreateInfo( + {}, **renderPass, attachments, surfaceData.extent.width, surfaceData.extent.height, 1 ); + framebuffers.push_back( vk::raii::su::make_unique( *device, framebufferCreateInfo ) ); + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/12_InitFrameBuffers/CMakeLists.txt b/RAII_Samples/12_InitFrameBuffers/CMakeLists.txt new file mode 100644 index 0000000..713a9df --- /dev/null +++ b/RAII_Samples/12_InitFrameBuffers/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_12_InitFrameBuffers) + +set(HEADERS +) + +set(SOURCES + 12_InitFrameBuffers.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_12_InitFrameBuffers + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_12_InitFrameBuffers PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_12_InitFrameBuffers PRIVATE utils) diff --git a/RAII_Samples/13_InitVertexBuffer/13_InitVertexBuffer.cpp b/RAII_Samples/13_InitVertexBuffer/13_InitVertexBuffer.cpp new file mode 100644 index 0000000..d601d7e --- /dev/null +++ b/RAII_Samples/13_InitVertexBuffer/13_InitVertexBuffer.cpp @@ -0,0 +1,151 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 13_InitVertexBuffer +// Initialize vertex buffer + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __GNUC__ ) +# if ( 9 <= __GNUC__ ) +# pragma GCC diagnostic ignored "-Winit-list-lifetime" +# endif +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../../samples/utils/geometries.hpp" +#include "../utils/utils.hpp" + +#include + +static char const * AppName = "13_InitVertexBuffer"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 64, 64 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, swapChainData.colorFormat, depthBufferData.format ); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + /* VULKAN_KEY_START */ + + // create a vertex buffer for some vertex and color data + vk::BufferCreateInfo bufferCreateInfo( {}, sizeof( coloredCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + std::unique_ptr vertexBuffer = vk::raii::su::make_unique( *device, bufferCreateInfo ); + + // allocate device memory for that buffer + vk::MemoryRequirements memoryRequirements = vertexBuffer->getMemoryRequirements(); + uint32_t memoryTypeIndex = + vk::su::findMemoryType( physicalDevice->getMemoryProperties(), + memoryRequirements.memoryTypeBits, + vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ); + vk::MemoryAllocateInfo memoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ); + std::unique_ptr deviceMemory = + vk::raii::su::make_unique( *device, memoryAllocateInfo ); + + // copy the vertex and color data into that device memory + uint8_t * pData = static_cast( deviceMemory->mapMemory( 0, memoryRequirements.size ) ); + memcpy( pData, coloredCubeData, sizeof( coloredCubeData ) ); + deviceMemory->unmapMemory(); + + // and bind the device memory to the vertex buffer + vertexBuffer->bindMemory( **deviceMemory, 0 ); + + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + + commandBuffer->begin( {} ); + + vk::RenderPassBeginInfo renderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + + commandBuffer->bindVertexBuffers( 0, { **vertexBuffer }, { 0 } ); + + commandBuffer->endRenderPass(); + commandBuffer->end(); + vk::raii::su::submitAndWait( *device, *graphicsQueue, *commandBuffer ); + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/13_InitVertexBuffer/CMakeLists.txt b/RAII_Samples/13_InitVertexBuffer/CMakeLists.txt new file mode 100644 index 0000000..d638b94 --- /dev/null +++ b/RAII_Samples/13_InitVertexBuffer/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_13_InitVertexBuffer) + +set(HEADERS +) + +set(SOURCES + 13_InitVertexBuffer.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_13_InitVertexBuffer + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_13_InitVertexBuffer PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_13_InitVertexBuffer PRIVATE utils) diff --git a/RAII_Samples/14_InitPipeline/14_InitPipeline.cpp b/RAII_Samples/14_InitPipeline/14_InitPipeline.cpp new file mode 100644 index 0000000..6bcaaaf --- /dev/null +++ b/RAII_Samples/14_InitPipeline/14_InitPipeline.cpp @@ -0,0 +1,200 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 14_InitPipeline +// Initialize graphics pipeline + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wmissing-braces" +#elif defined( __GNUC__ ) +// no need to ignore any warnings with GCC +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" + +#include + +static char const * AppName = "14_InitPipeline"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, vk::Format::eD16Unorm ); + + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); + std::unique_ptr pipelineLayout = + vk::raii::su::makeUniquePipelineLayout( *device, *descriptorSetLayout ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PC_C ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C ); + glslang::FinalizeProcess(); + + /* VULKAN_KEY_START */ + + std::array pipelineShaderStageCreateInfos = { + vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eVertex, **vertexShaderModule, "main" ), + vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eFragment, **fragmentShaderModule, "main" ) + }; + + vk::VertexInputBindingDescription vertexInputBindingDescription( 0, sizeof( coloredCubeData[0] ) ); + std::array vertexInputAttributeDescriptions = { + vk::VertexInputAttributeDescription( 0, 0, vk::Format::eR32G32B32A32Sfloat, 0 ), + vk::VertexInputAttributeDescription( 1, 0, vk::Format::eR32G32B32A32Sfloat, 16 ) + }; + vk::PipelineVertexInputStateCreateInfo pipelineVertexInputStateCreateInfo( + {}, // flags + vertexInputBindingDescription, // vertexBindingDescriptions + vertexInputAttributeDescriptions // vertexAttributeDescriptions + ); + + vk::PipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateCreateInfo( + {}, vk::PrimitiveTopology::eTriangleList ); + + vk::PipelineViewportStateCreateInfo pipelineViewportStateCreateInfo( {}, 1, nullptr, 1, nullptr ); + + vk::PipelineRasterizationStateCreateInfo pipelineRasterizationStateCreateInfo( + {}, // flags + false, // depthClampEnable + false, // rasterizerDiscardEnable + vk::PolygonMode::eFill, // polygonMode + vk::CullModeFlagBits::eBack, // cullMode + vk::FrontFace::eClockwise, // frontFace + false, // depthBiasEnable + 0.0f, // depthBiasConstantFactor + 0.0f, // depthBiasClamp + 0.0f, // depthBiasSlopeFactor + 1.0f // lineWidth + ); + + vk::PipelineMultisampleStateCreateInfo pipelineMultisampleStateCreateInfo( + {}, // flags + vk::SampleCountFlagBits::e1 // rasterizationSamples + // other values can be default + ); + + vk::StencilOpState stencilOpState( + vk::StencilOp::eKeep, vk::StencilOp::eKeep, vk::StencilOp::eKeep, vk::CompareOp::eAlways ); + vk::PipelineDepthStencilStateCreateInfo pipelineDepthStencilStateCreateInfo( + {}, // flags + true, // depthTestEnable + true, // depthWriteEnable + vk::CompareOp::eLessOrEqual, // depthCompareOp + false, // depthBoundTestEnable + false, // stencilTestEnable + stencilOpState, // front + stencilOpState // back + ); + + vk::ColorComponentFlags colorComponentFlags( vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | + vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA ); + vk::PipelineColorBlendAttachmentState pipelineColorBlendAttachmentState( + false, // blendEnable + vk::BlendFactor::eZero, // srcColorBlendFactor + vk::BlendFactor::eZero, // dstColorBlendFactor + vk::BlendOp::eAdd, // colorBlendOp + vk::BlendFactor::eZero, // srcAlphaBlendFactor + vk::BlendFactor::eZero, // dstAlphaBlendFactor + vk::BlendOp::eAdd, // alphaBlendOp + colorComponentFlags // colorWriteMask + ); + vk::PipelineColorBlendStateCreateInfo pipelineColorBlendStateCreateInfo( + {}, // flags + false, // logicOpEnable + vk::LogicOp::eNoOp, // logicOp + pipelineColorBlendAttachmentState, // attachments + { { 1.0f, 1.0f, 1.0f, 1.0f } } // blendConstants + ); + + std::array dynamicStates = { vk::DynamicState::eViewport, vk::DynamicState::eScissor }; + vk::PipelineDynamicStateCreateInfo pipelineDynamicStateCreateInfo( {}, dynamicStates ); + + vk::GraphicsPipelineCreateInfo graphicsPipelineCreateInfo( + {}, // flags + pipelineShaderStageCreateInfos, // stages + &pipelineVertexInputStateCreateInfo, // pVertexInputState + &pipelineInputAssemblyStateCreateInfo, // pInputAssemblyState + nullptr, // pTessellationState + &pipelineViewportStateCreateInfo, // pViewportState + &pipelineRasterizationStateCreateInfo, // pRasterizationState + &pipelineMultisampleStateCreateInfo, // pMultisampleState + &pipelineDepthStencilStateCreateInfo, // pDepthStencilState + &pipelineColorBlendStateCreateInfo, // pColorBlendState + &pipelineDynamicStateCreateInfo, // pDynamicState + **pipelineLayout, // layout + **renderPass // renderPass + ); + + std::shared_ptr pipeline = + vk::raii::su::make_unique( *device, nullptr, graphicsPipelineCreateInfo ); + switch ( pipeline->getConstructorSuccessCode() ) + { + case vk::Result::eSuccess: break; + case vk::Result::ePipelineCompileRequiredEXT: + // something meaningfull here + break; + default: assert( false ); // should never happen + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/14_InitPipeline/CMakeLists.txt b/RAII_Samples/14_InitPipeline/CMakeLists.txt new file mode 100644 index 0000000..1764ce6 --- /dev/null +++ b/RAII_Samples/14_InitPipeline/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_14_InitPipeline) + +set(HEADERS +) + +set(SOURCES + 14_InitPipeline.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_14_InitPipeline + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_14_InitPipeline PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_14_InitPipeline PRIVATE utils) diff --git a/RAII_Samples/15_DrawCube/15_DrawCube.cpp b/RAII_Samples/15_DrawCube/15_DrawCube.cpp new file mode 100644 index 0000000..a8fdae1 --- /dev/null +++ b/RAII_Samples/15_DrawCube/15_DrawCube.cpp @@ -0,0 +1,216 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 15_DrawCube +// Draw a cube + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __GNUC__ ) +# if ( 9 <= __GNUC__ ) +# pragma GCC diagnostic ignored "-Winit-list-lifetime" +# endif +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" + +#include +#include + +static char const * AppName = "15_DrawCube"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcMatrix ); + + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); + std::unique_ptr pipelineLayout = + vk::raii::su::makeUniquePipelineLayout( *device, *descriptorSetLayout ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, depthBufferData.format ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PC_C ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + vk::raii::su::BufferData vertexBufferData( + *physicalDevice, *device, sizeof( coloredCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + vk::raii::su::copyToDevice( + *vertexBufferData.deviceMemory, coloredCubeData, sizeof( coloredCubeData ) / sizeof( coloredCubeData[0] ) ); + + std::unique_ptr descriptorPool = + vk::raii::su::makeUniqueDescriptorPool( *device, { { vk::DescriptorType::eUniformBuffer, 1 } } ); + std::unique_ptr descriptorSet = + vk::raii::su::makeUniqueDescriptorSet( *device, *descriptorPool, *descriptorSetLayout ); + vk::raii::su::updateDescriptorSets( + *device, *descriptorSet, { { vk::DescriptorType::eUniformBuffer, *uniformBufferData.buffer, nullptr } }, {} ); + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + std::unique_ptr graphicsPipeline = vk::raii::su::makeUniqueGraphicsPipeline( + *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + vk::su::checked_cast( sizeof( coloredCubeData[0] ) ), + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32B32A32Sfloat, 16 } }, + vk::FrontFace::eClockwise, + true, + *pipelineLayout, + *renderPass ); + + /* VULKAN_KEY_START */ + + // Get the index of the next available swapchain image: + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + commandBuffer->begin( {} ); + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + vk::RenderPassBeginInfo renderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + commandBuffer->bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, nullptr ); + + commandBuffer->bindVertexBuffers( 0, { **vertexBufferData.buffer }, { 0 } ); + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + commandBuffer->draw( 12 * 3, 1, 0, 0 ); + commandBuffer->endRenderPass(); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + + while ( vk::Result::eTimeout == device->waitForFences( { **drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + /* VULKAN_KEY_END */ + + device->waitIdle(); + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/15_DrawCube/CMakeLists.txt b/RAII_Samples/15_DrawCube/CMakeLists.txt new file mode 100644 index 0000000..f168e18 --- /dev/null +++ b/RAII_Samples/15_DrawCube/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_15_DrawCube) + +set(HEADERS +) + +set(SOURCES + 15_DrawCube.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_15_DrawCube + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_15_DrawCube PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_15_DrawCube PRIVATE utils) diff --git a/RAII_Samples/16_Vulkan_1_1/16_Vulkan_1_1.cpp b/RAII_Samples/16_Vulkan_1_1/16_Vulkan_1_1.cpp new file mode 100644 index 0000000..609c393 --- /dev/null +++ b/RAII_Samples/16_Vulkan_1_1/16_Vulkan_1_1.cpp @@ -0,0 +1,122 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : 16_Vulkan_1_1 +// Determine if the current system can use Vulkan 1.1 API features + +#include "../utils/utils.hpp" + +static char const * AppName = "16_Vulkan_1_1"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + /* VULKAN_KEY_START */ + + // Keep track of the major/minor version we can actually use + uint16_t usingMajorVersion = 1; + uint16_t usingMinorVersion = 0; + std::string usingVersionString = ""; + + // Set the desired version we want + uint16_t desiredMajorVersion = 1; + uint16_t desiredMinorVersion = 1; + uint32_t desiredVersion = VK_MAKE_VERSION( desiredMajorVersion, desiredMinorVersion, 0 ); + std::string desiredVersionString = ""; + desiredVersionString += std::to_string( desiredMajorVersion ); + desiredVersionString += "."; + desiredVersionString += std::to_string( desiredMinorVersion ); + + // initialize the vulkan context + std::unique_ptr context = vk::raii::su::make_unique(); + + // Determine what API version is available + uint32_t apiVersion = context->enumerateInstanceVersion(); + + // Translate the version into major/minor for easier comparison + uint32_t loader_major_version = VK_VERSION_MAJOR( apiVersion ); + uint32_t loader_minor_version = VK_VERSION_MINOR( apiVersion ); + std::cout << "Loader/Runtime support detected for Vulkan " << loader_major_version << "." << loader_minor_version + << "\n"; + + // Check current version against what we want to run + if ( loader_major_version > desiredMajorVersion || + ( loader_major_version == desiredMajorVersion && loader_minor_version >= desiredMinorVersion ) ) + { + // Create the instance + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::make_unique( *instance, vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); +#endif + + // Get the list of physical devices + vk::raii::PhysicalDevices physicalDevices( *instance ); + + // Go through the list of physical devices and select only those that are capable of running the API version we + // want. + std::vector> desiredPhysicalDevices; + for ( auto & pdh : physicalDevices ) + { + if ( desiredVersion <= pdh.getProperties().apiVersion ) + { + desiredPhysicalDevices.push_back( vk::raii::su::make_unique( std::move( pdh ) ) ); + } + } + + // If we have something in the desired version physical device list, we're good + if ( desiredPhysicalDevices.size() > 0 ) + { + usingMajorVersion = desiredMajorVersion; + usingMinorVersion = desiredMinorVersion; + } + } + + usingVersionString += std::to_string( usingMajorVersion ); + usingVersionString += "."; + usingVersionString += std::to_string( usingMinorVersion ); + + if ( usingMinorVersion < desiredMinorVersion ) + { + std::cout << "Determined that this system can only use Vulkan API version " << usingVersionString + << " instead of desired version " << desiredVersionString << std::endl; + } + else + { + std::cout << "Determined that this system can run desired Vulkan API version " << desiredVersionString + << std::endl; + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/16_Vulkan_1_1/CMakeLists.txt b/RAII_Samples/16_Vulkan_1_1/CMakeLists.txt new file mode 100644 index 0000000..4759f44 --- /dev/null +++ b/RAII_Samples/16_Vulkan_1_1/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_16_Vulkan_1_1) + +set(HEADERS +) + +set(SOURCES + 16_Vulkan_1_1.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_16_Vulkan_1_1 + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_16_Vulkan_1_1 PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_16_Vulkan_1_1 PRIVATE utils) diff --git a/RAII_Samples/CMakeLists.txt b/RAII_Samples/CMakeLists.txt new file mode 100644 index 0000000..3fa6a36 --- /dev/null +++ b/RAII_Samples/CMakeLists.txt @@ -0,0 +1,56 @@ +# Copyright(c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(Vulkan-Hpp_RAIISamples) + +option (SAMPLES_BUILD_WITH_LOCAL_VULKAN_HPP "Build with local Vulkan headers" ON) +option (SAMPLES_BUILD_ONLY_DYNAMIC "Build only dynamic. Required in case the Vulkan SDK is not available" OFF) + +if(NOT (SAMPLES_BUILD_ONLY_DYNAMIC AND SAMPLES_BUILD_WITH_LOCAL_VULKAN_HPP)) + find_package(Vulkan REQUIRED) +endif() + +if(MSVC) + add_compile_options(/W4 /WX /permissive-) +else(MSVC) + add_compile_options(-Wall -Wextra -pedantic -Werror) +endif(MSVC) + +if (CMAKE_SYSTEM_NAME MATCHES "Windows") + add_definitions(-DNOMINMAX -DVK_USE_PLATFORM_WIN32_KHR) +elseif(CMAKE_SYSTEM_NAME MATCHES "Linux") + add_definitions(-DVK_USE_PLATFORM_XCB_KHR) +else() + message(FATAL_ERROR, "Vulkan-Hpp: unhandled platform for samples!") +endif() + +FILE (GLOB linkunits ${CMAKE_CURRENT_SOURCE_DIR}/*) + +if (SAMPLES_BUILD_WITH_LOCAL_VULKAN_HPP) + include_directories("${CMAKE_CURRENT_SOURCE_DIR}/..") + include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../Vulkan-Headers/include") +else() + include_directories("${Vulkan_INCLUDE_DIRS}") +endif() + +FOREACH( linkunit ${linkunits} ) + if( IS_DIRECTORY ${linkunit} ) + if( EXISTS ${linkunit}/CMakeLists.txt ) + string( REGEX REPLACE "^.*/([^/]*)$" "\\1" LINK_NAME ${linkunit} ) + add_subdirectory( ${LINK_NAME} ) + endif() + endif() +ENDFOREACH( linkunit ${linkunits} ) diff --git a/RAII_Samples/CopyBlitImage/CMakeLists.txt b/RAII_Samples/CopyBlitImage/CMakeLists.txt new file mode 100644 index 0000000..417f024 --- /dev/null +++ b/RAII_Samples/CopyBlitImage/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_CopyBlitImage) + +set(HEADERS +) + +set(SOURCES + CopyBlitImage.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_CopyBlitImage + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_CopyBlitImage PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_CopyBlitImage PRIVATE utils) diff --git a/RAII_Samples/CopyBlitImage/CopyBlitImage.cpp b/RAII_Samples/CopyBlitImage/CopyBlitImage.cpp new file mode 100644 index 0000000..cc355a1 --- /dev/null +++ b/RAII_Samples/CopyBlitImage/CopyBlitImage.cpp @@ -0,0 +1,266 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : CopyBlitImage +// Draw a cube + +#include "../utils/utils.hpp" + +#include + +static char const * AppName = "CopyBlitImage"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 640, 640 ) ); + + vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice->getSurfaceCapabilitiesKHR( **surfaceData.surface ); + if ( !( surfaceCapabilities.supportedUsageFlags & vk::ImageUsageFlagBits::eTransferDst ) ) + { + std::cout << "Surface cannot be destination of blit - abort \n"; + exit( -1 ); + } + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc | + vk::ImageUsageFlagBits::eTransferDst, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + /* VULKAN_KEY_START */ + + vk::FormatProperties formatProperties = physicalDevice->getFormatProperties( swapChainData.colorFormat ); + assert( ( formatProperties.linearTilingFeatures & vk::FormatFeatureFlagBits::eBlitSrc ) && + "Format cannot be used as transfer source" ); + + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + + // Get the index of the next available swapchain image: + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + vk::raii::su::setImageLayout( *commandBuffer, + static_cast( swapChainData.images[imageIndex] ), + swapChainData.colorFormat, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eTransferDstOptimal ); + + // Create an image, map it, and write some values to the image + vk::ImageCreateInfo imageCreateInfo( {}, + vk::ImageType::e2D, + swapChainData.colorFormat, + vk::Extent3D( surfaceData.extent, 1 ), + 1, + 1, + vk::SampleCountFlagBits::e1, + vk::ImageTiling::eLinear, + vk::ImageUsageFlagBits::eTransferSrc ); + std::unique_ptr blitSourceImage = vk::raii::su::make_unique( *device, imageCreateInfo ); + + vk::PhysicalDeviceMemoryProperties memoryProperties = physicalDevice->getMemoryProperties(); + vk::MemoryRequirements memoryRequirements = blitSourceImage->getMemoryRequirements(); + uint32_t memoryTypeIndex = vk::su::findMemoryType( + memoryProperties, memoryRequirements.memoryTypeBits, vk::MemoryPropertyFlagBits::eHostVisible ); + + vk::MemoryAllocateInfo memoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ); + std::unique_ptr deviceMemory = + vk::raii::su::make_unique( *device, memoryAllocateInfo ); + blitSourceImage->bindMemory( **deviceMemory, 0 ); + + vk::raii::su::setImageLayout( *commandBuffer, + **blitSourceImage, + swapChainData.colorFormat, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eGeneral ); + + commandBuffer->end(); + + /* Queue the command buffer for execution */ + std::unique_ptr commandFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **commandFence ); + + /* Make sure command buffer is finished before mapping */ + while ( device->waitForFences( { **commandFence }, true, vk::su::FenceTimeout ) == vk::Result::eTimeout ) + ; + + unsigned char * pImageMemory = + static_cast( deviceMemory->mapMemory( 0, memoryRequirements.size ) ); + + // Checkerboard of 8x8 pixel squares + for ( uint32_t row = 0; row < surfaceData.extent.height; row++ ) + { + for ( uint32_t col = 0; col < surfaceData.extent.width; col++ ) + { + unsigned char rgb = ( ( ( row & 0x8 ) == 0 ) ^ ( ( col & 0x8 ) == 0 ) ) * 255; + pImageMemory[0] = rgb; + pImageMemory[1] = rgb; + pImageMemory[2] = rgb; + pImageMemory[3] = 255; + pImageMemory += 4; + } + } + + // Flush the mapped memory and then unmap it. Assume it isn't coherent since we didn't really confirm + vk::MappedMemoryRange mappedMemoryRange( **deviceMemory, 0, memoryRequirements.size ); + device->flushMappedMemoryRanges( mappedMemoryRange ); + deviceMemory->unmapMemory(); + + commandBuffer->reset( {} ); + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + + // Intend to blit from this image, set the layout accordingly + vk::raii::su::setImageLayout( *commandBuffer, + **blitSourceImage, + swapChainData.colorFormat, + vk::ImageLayout::eGeneral, + vk::ImageLayout::eTransferSrcOptimal ); + + vk::Image blitDestinationImage = static_cast( swapChainData.images[imageIndex] ); + + // Do a 32x32 blit to all of the dst image - should get big squares + vk::ImageSubresourceLayers imageSubresourceLayers( vk::ImageAspectFlagBits::eColor, 0, 0, 1 ); + vk::ImageBlit imageBlit( + imageSubresourceLayers, + { { vk::Offset3D( 0, 0, 0 ), vk::Offset3D( 32, 32, 1 ) } }, + imageSubresourceLayers, + { { vk::Offset3D( 0, 0, 0 ), vk::Offset3D( surfaceData.extent.width, surfaceData.extent.height, 1 ) } } ); + commandBuffer->blitImage( **blitSourceImage, + vk::ImageLayout::eTransferSrcOptimal, + blitDestinationImage, + vk::ImageLayout::eTransferDstOptimal, + imageBlit, + vk::Filter::eLinear ); + + // Use a barrier to make sure the blit is finished before the copy starts + vk::ImageMemoryBarrier memoryBarrier( vk::AccessFlagBits::eTransferWrite, + vk::AccessFlagBits::eMemoryRead, + vk::ImageLayout::eTransferDstOptimal, + vk::ImageLayout::eTransferDstOptimal, + VK_QUEUE_FAMILY_IGNORED, + VK_QUEUE_FAMILY_IGNORED, + blitDestinationImage, + vk::ImageSubresourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ) ); + commandBuffer->pipelineBarrier( + vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eTransfer, {}, nullptr, nullptr, memoryBarrier ); + + // Do a image copy to part of the dst image - checks should stay small + vk::ImageCopy imageCopy( imageSubresourceLayers, + vk::Offset3D(), + imageSubresourceLayers, + vk::Offset3D( 256, 256, 0 ), + vk::Extent3D( 128, 128, 1 ) ); + commandBuffer->copyImage( **blitSourceImage, + vk::ImageLayout::eTransferSrcOptimal, + blitDestinationImage, + vk::ImageLayout::eTransferDstOptimal, + imageCopy ); + + vk::ImageMemoryBarrier prePresentBarrier( + vk::AccessFlagBits::eTransferWrite, + vk::AccessFlagBits::eMemoryRead, + vk::ImageLayout::eTransferDstOptimal, + vk::ImageLayout::ePresentSrcKHR, + VK_QUEUE_FAMILY_IGNORED, + VK_QUEUE_FAMILY_IGNORED, + blitDestinationImage, + vk::ImageSubresourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ) ); + commandBuffer->pipelineBarrier( vk::PipelineStageFlagBits::eTransfer, + vk::PipelineStageFlagBits::eTopOfPipe, + {}, + nullptr, + nullptr, + prePresentBarrier ); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + submitInfo = vk::SubmitInfo( {}, {}, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + graphicsQueue->waitIdle(); + + /* Make sure command buffer is finished before presenting */ + while ( device->waitForFences( { **drawFence }, true, vk::su::FenceTimeout ) == vk::Result::eTimeout ) + ; + + /* Now present the image in the window */ + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/CreateDebugUtilsMessenger/CMakeLists.txt b/RAII_Samples/CreateDebugUtilsMessenger/CMakeLists.txt new file mode 100644 index 0000000..5be9d9c --- /dev/null +++ b/RAII_Samples/CreateDebugUtilsMessenger/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_CreateDebugUtilsMessenger) + +set(HEADERS +) + +set(SOURCES + CreateDebugUtilsMessenger.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_CreateDebugUtilsMessenger + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_CreateDebugUtilsMessenger PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_CreateDebugUtilsMessenger PRIVATE utils) diff --git a/RAII_Samples/CreateDebugUtilsMessenger/CreateDebugUtilsMessenger.cpp b/RAII_Samples/CreateDebugUtilsMessenger/CreateDebugUtilsMessenger.cpp new file mode 100644 index 0000000..234e6f6 --- /dev/null +++ b/RAII_Samples/CreateDebugUtilsMessenger/CreateDebugUtilsMessenger.cpp @@ -0,0 +1,144 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : CreateDebugReportMessenger +// Draw a cube + +#include "../utils/utils.hpp" + +#include +#include + +static char const * AppName = "CreateDebugReportMessenger"; +static char const * EngineName = "Vulkan.hpp"; + +VKAPI_ATTR VkBool32 VKAPI_CALL debugMessageFunc( VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + VkDebugUtilsMessengerCallbackDataEXT const * pCallbackData, + void * /*pUserData*/ ) +{ + std::ostringstream message; + + message << vk::to_string( static_cast( messageSeverity ) ) << ": " + << vk::to_string( static_cast( messageTypes ) ) << ":\n"; + message << "\t" + << "messageIDName = <" << pCallbackData->pMessageIdName << ">\n"; + message << "\t" + << "messageIdNumber = " << pCallbackData->messageIdNumber << "\n"; + message << "\t" + << "message = <" << pCallbackData->pMessage << ">\n"; + if ( 0 < pCallbackData->queueLabelCount ) + { + message << "\t" + << "Queue Labels:\n"; + for ( uint8_t i = 0; i < pCallbackData->queueLabelCount; i++ ) + { + message << "\t\t" + << "labelName = <" << pCallbackData->pQueueLabels[i].pLabelName << ">\n"; + } + } + if ( 0 < pCallbackData->cmdBufLabelCount ) + { + message << "\t" + << "CommandBuffer Labels:\n"; + for ( uint8_t i = 0; i < pCallbackData->cmdBufLabelCount; i++ ) + { + message << "\t\t" + << "labelName = <" << pCallbackData->pCmdBufLabels[i].pLabelName << ">\n"; + } + } + if ( 0 < pCallbackData->objectCount ) + { + message << "\t" + << "Objects:\n"; + for ( uint8_t i = 0; i < pCallbackData->objectCount; i++ ) + { + message << "\t\t" + << "Object " << i << "\n"; + message << "\t\t\t" + << "objectType = " + << vk::to_string( static_cast( pCallbackData->pObjects[i].objectType ) ) << "\n"; + message << "\t\t\t" + << "objectHandle = " << pCallbackData->pObjects[i].objectHandle << "\n"; + if ( pCallbackData->pObjects[i].pObjectName ) + { + message << "\t\t\t" + << "objectName = <" << pCallbackData->pObjects[i].pObjectName << ">\n"; + } + } + } + +#ifdef _WIN32 + MessageBox( NULL, message.str().c_str(), "Alert", MB_OK ); +#else + std::cout << message.str() << std::endl; +#endif + + return false; +} + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + + /* VULKAN_KEY_START */ + + std::vector props = context->enumerateInstanceExtensionProperties(); + + auto propsIterator = std::find_if( props.begin(), props.end(), []( vk::ExtensionProperties const & ep ) { + return strcmp( ep.extensionName, VK_EXT_DEBUG_UTILS_EXTENSION_NAME ) == 0; + } ); + if ( propsIterator == props.end() ) + { + std::cout << "Something went very wrong, cannot find " << VK_EXT_DEBUG_UTILS_EXTENSION_NAME << " extension" + << std::endl; + exit( 1 ); + } + + vk::ApplicationInfo applicationInfo( AppName, 1, EngineName, 1, VK_API_VERSION_1_1 ); + const char * extensionName = VK_EXT_DEBUG_UTILS_EXTENSION_NAME; + vk::InstanceCreateInfo instanceCreateInfo( {}, &applicationInfo, {}, extensionName ); + std::unique_ptr instance = vk::raii::su::make_unique( *context, instanceCreateInfo ); + + vk::DebugUtilsMessageSeverityFlagsEXT severityFlags( vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning | + vk::DebugUtilsMessageSeverityFlagBitsEXT::eError ); + vk::DebugUtilsMessageTypeFlagsEXT messageTypeFlags( vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral | + vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance | + vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation ); + vk::DebugUtilsMessengerCreateInfoEXT debugUtilsMessengerCreateInfoEXT( + {}, severityFlags, messageTypeFlags, &debugMessageFunc ); + std::unique_ptr debugUtilsMessenger = + vk::raii::su::make_unique( *instance, debugUtilsMessengerCreateInfoEXT ); + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/DebugUtilsObjectName/CMakeLists.txt b/RAII_Samples/DebugUtilsObjectName/CMakeLists.txt new file mode 100644 index 0000000..a4f1480 --- /dev/null +++ b/RAII_Samples/DebugUtilsObjectName/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_DebugUtilsObjectName) + +set(HEADERS +) + +set(SOURCES + DebugUtilsObjectName.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_DebugUtilsObjectName + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_DebugUtilsObjectName PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_DebugUtilsObjectName PRIVATE utils) diff --git a/RAII_Samples/DebugUtilsObjectName/DebugUtilsObjectName.cpp b/RAII_Samples/DebugUtilsObjectName/DebugUtilsObjectName.cpp new file mode 100644 index 0000000..60edddd --- /dev/null +++ b/RAII_Samples/DebugUtilsObjectName/DebugUtilsObjectName.cpp @@ -0,0 +1,75 @@ +// Copyright(c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : DebugUtilsObjectName +// Demonstrate usage of DebugUtilsObjectName + +#include "../utils/utils.hpp" + +static char const * AppName = "DebugUtilsObjectName"; +static char const * EngineName = "Vulkan.hpp"; + +#if defined( _MSC_VER ) && !defined( _WIN64 ) +# define NON_DISPATCHABLE_HANDLE_TO_UINT64_CAST( type, x ) static_cast( x ) +#else +# define NON_DISPATCHABLE_HANDLE_TO_UINT64_CAST( type, x ) reinterpret_cast( static_cast( x ) ) +#endif + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + vk::raii::PhysicalDevices physicalDevices( *instance ); + assert( !physicalDevices.empty() ); + + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevices[0].getQueueFamilyProperties() ); + std::unique_ptr device = + vk::raii::su::makeUniqueDevice( physicalDevices[0], graphicsQueueFamilyIndex ); + + // create an image + std::unique_ptr image = vk::raii::su::makeUniqueImage( *device ); + + /* VULKAN_KEY_START */ + + vk::DebugUtilsObjectNameInfoEXT debugUtilsObjectNameInfo( + vk::ObjectType::eImage, NON_DISPATCHABLE_HANDLE_TO_UINT64_CAST( VkImage, **image ), "Image name" ); + device->setDebugUtilsObjectNameEXT( debugUtilsObjectNameInfo ); + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/DrawTexturedCube/CMakeLists.txt b/RAII_Samples/DrawTexturedCube/CMakeLists.txt new file mode 100644 index 0000000..5075ab2 --- /dev/null +++ b/RAII_Samples/DrawTexturedCube/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_DrawTexturedCube) + +set(HEADERS +) + +set(SOURCES + DrawTexturedCube.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_DrawTexturedCube + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_DrawTexturedCube PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_DrawTexturedCube PRIVATE utils) diff --git a/RAII_Samples/DrawTexturedCube/DrawTexturedCube.cpp b/RAII_Samples/DrawTexturedCube/DrawTexturedCube.cpp new file mode 100644 index 0000000..b02f8de --- /dev/null +++ b/RAII_Samples/DrawTexturedCube/DrawTexturedCube.cpp @@ -0,0 +1,214 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : DrawTexturedCube +// Draw a textured cube + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan_raii.hpp" + +#include +#include + +static char const * AppName = "DrawTexturedCube"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + vk::raii::su::TextureData textureData( *physicalDevice, *device ); + + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + textureData.setImage( *commandBuffer, vk::su::CheckerboardImageGenerator() ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcMatrix ); + + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, + { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, + { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); + std::unique_ptr pipelineLayout = + vk::raii::su::makeUniquePipelineLayout( *device, *descriptorSetLayout ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, depthBufferData.format ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + vk::raii::su::BufferData vertexBufferData( + *physicalDevice, *device, sizeof( texturedCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + vk::raii::su::copyToDevice( + *vertexBufferData.deviceMemory, texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); + + std::unique_ptr descriptorPool = vk::raii::su::makeUniqueDescriptorPool( + *device, { { vk::DescriptorType::eUniformBuffer, 1 }, { vk::DescriptorType::eCombinedImageSampler, 1 } } ); + std::unique_ptr descriptorSet = + vk::raii::su::makeUniqueDescriptorSet( *device, *descriptorPool, *descriptorSetLayout ); + + vk::raii::su::updateDescriptorSets( *device, + *descriptorSet, + { { vk::DescriptorType::eUniformBuffer, *uniformBufferData.buffer, nullptr } }, + { textureData } ); + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + std::unique_ptr graphicsPipeline = vk::raii::su::makeUniqueGraphicsPipeline( + *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + sizeof( texturedCubeData[0] ), + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + vk::FrontFace::eClockwise, + true, + *pipelineLayout, + *renderPass ); + + /* VULKAN_KEY_START */ + + // Get the index of the next available swapchain image: + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + // commandBuffer->begin() has already been called above! + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + vk::RenderPassBeginInfo renderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + commandBuffer->bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, nullptr ); + + commandBuffer->bindVertexBuffers( 0, { **vertexBufferData.buffer }, { 0 } ); + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + commandBuffer->draw( 12 * 3, 1, 0, 0 ); + commandBuffer->endRenderPass(); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + + while ( vk::Result::eTimeout == device->waitForFences( { **drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/DynamicUniform/CMakeLists.txt b/RAII_Samples/DynamicUniform/CMakeLists.txt new file mode 100644 index 0000000..1a622a7 --- /dev/null +++ b/RAII_Samples/DynamicUniform/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_DynamicUniform) + +set(HEADERS +) + +set(SOURCES + DynamicUniform.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_DynamicUniform + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_DynamicUniform PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_DynamicUniform PRIVATE utils) diff --git a/RAII_Samples/DynamicUniform/DynamicUniform.cpp b/RAII_Samples/DynamicUniform/DynamicUniform.cpp new file mode 100644 index 0000000..23bfd33 --- /dev/null +++ b/RAII_Samples/DynamicUniform/DynamicUniform.cpp @@ -0,0 +1,254 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : DynamicUniform +// Draw 2 Cubes using dynamic uniform buffer + +#if defined( _MSC_VER ) +# pragma warning( disable : 4127 ) // conditional expression is constant +#endif + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan_raii.hpp" + +#include +#include + +static char const * AppName = "DynamicUniform"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, depthBufferData.format ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PC_C ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + vk::raii::su::BufferData vertexBufferData( + *physicalDevice, *device, sizeof( coloredCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + vk::raii::su::copyToDevice( + *vertexBufferData.deviceMemory, coloredCubeData, sizeof( coloredCubeData ) / sizeof( coloredCubeData[0] ) ); + + /* VULKAN_KEY_START */ + + vk::PhysicalDeviceLimits limits = physicalDevice->getProperties().limits; + if ( limits.maxDescriptorSetUniformBuffersDynamic < 1 ) + { + std::cout << "No dynamic uniform buffers supported\n"; + exit( -1 ); + } + + /* Set up uniform buffer with 2 transform matrices in it */ + glm::mat4x4 mvpcs[2]; + glm::mat4x4 model = glm::mat4x4( 1.0f ); + glm::mat4x4 view = + glm::lookAt( glm::vec3( 0.0f, 3.0f, -10.0f ), glm::vec3( 0.0f, 0.0f, 0.0f ), glm::vec3( 0.0f, -1.0f, 0.0f ) ); + glm::mat4x4 projection = glm::perspective( glm::radians( 45.0f ), 1.0f, 0.1f, 100.0f ); + // clang-format off + glm::mat4x4 clip = glm::mat4x4( 1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, -1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.5f, 0.0f, + 0.0f, 0.0f, 0.5f, 1.0f ); // vulkan clip space has inverted y and half z ! + // clang-format on + mvpcs[0] = clip * projection * view * model; + + model = glm::translate( model, glm::vec3( -1.5f, 1.5f, -1.5f ) ); + mvpcs[1] = clip * projection * view * model; + + vk::DeviceSize bufferSize = sizeof( glm::mat4x4 ); + if ( limits.minUniformBufferOffsetAlignment ) + { + bufferSize = + ( bufferSize + limits.minUniformBufferOffsetAlignment - 1 ) & ~( limits.minUniformBufferOffsetAlignment - 1 ); + } + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, 2 * bufferSize, vk::BufferUsageFlagBits::eUniformBuffer ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcs, 2, bufferSize ); + + // create a DescriptorSetLayout with vk::DescriptorType::eUniformBufferDynamic + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, { { vk::DescriptorType::eUniformBufferDynamic, 1, vk::ShaderStageFlagBits::eVertex } } ); + std::unique_ptr pipelineLayout = + vk::raii::su::makeUniquePipelineLayout( *device, *descriptorSetLayout ); + + // create a DescriptorPool with vk::DescriptorType::eUniformBufferDynamic + std::unique_ptr descriptorPool = + vk::raii::su::makeUniqueDescriptorPool( *device, { { vk::DescriptorType::eUniformBufferDynamic, 1 } } ); + std::unique_ptr descriptorSet = + vk::raii::su::makeUniqueDescriptorSet( *device, *descriptorPool, *descriptorSetLayout ); + + vk::raii::su::updateDescriptorSets( + *device, + *descriptorSet, + { { vk::DescriptorType::eUniformBufferDynamic, *uniformBufferData.buffer, nullptr } }, + {} ); + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + std::unique_ptr graphicsPipeline = vk::raii::su::makeUniqueGraphicsPipeline( + *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + sizeof( coloredCubeData[0] ), + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + vk::FrontFace::eClockwise, + true, + *pipelineLayout, + *renderPass ); + + // Get the index of the next available swapchain image: + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + commandBuffer->begin( {} ); + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + vk::RenderPassBeginInfo renderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + /* The first draw should use the first matrix in the buffer */ + uint32_t dynamicOffset = 0; + commandBuffer->bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, dynamicOffset ); + + commandBuffer->bindVertexBuffers( 0, { **vertexBufferData.buffer }, { 0 } ); + commandBuffer->draw( 12 * 3, 1, 0, 0 ); + + // the second draw should use the second matrix in the buffer; + dynamicOffset = (uint32_t)bufferSize; + commandBuffer->bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, dynamicOffset ); + commandBuffer->draw( 12 * 3, 1, 0, 0 ); + + commandBuffer->endRenderPass(); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + + while ( vk::Result::eTimeout == device->waitForFences( { **drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/EnableValidationWithCallback/CMakeLists.txt b/RAII_Samples/EnableValidationWithCallback/CMakeLists.txt new file mode 100644 index 0000000..0f3eb7a --- /dev/null +++ b/RAII_Samples/EnableValidationWithCallback/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_EnableValidationWithCallback) + +set(HEADERS +) + +set(SOURCES + EnableValidationWithCallback.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_EnableValidationWithCallback + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_EnableValidationWithCallback PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_EnableValidationWithCallback PRIVATE utils) diff --git a/RAII_Samples/EnableValidationWithCallback/EnableValidationWithCallback.cpp b/RAII_Samples/EnableValidationWithCallback/EnableValidationWithCallback.cpp new file mode 100644 index 0000000..a08a2ed --- /dev/null +++ b/RAII_Samples/EnableValidationWithCallback/EnableValidationWithCallback.cpp @@ -0,0 +1,211 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : EnableValidationWithCallback +// Show how to enable validation layers and provide callback + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wunused-variable" +#elif defined( __GNUC__ ) +# pragma GCC diagnostic ignored "-Wunused-but-set-variable" +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../utils/utils.hpp" +#include "vulkan/vulkan_raii.hpp" + +#include +#include +#include + +static char const * AppName = "EnableValidationWithCallback"; +static char const * EngineName = "Vulkan.hpp"; + +PFN_vkCreateDebugUtilsMessengerEXT pfnVkCreateDebugUtilsMessengerEXT; +PFN_vkDestroyDebugUtilsMessengerEXT pfnVkDestroyDebugUtilsMessengerEXT; + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugUtilsMessengerEXT( VkInstance instance, + const VkDebugUtilsMessengerCreateInfoEXT * pCreateInfo, + const VkAllocationCallbacks * pAllocator, + VkDebugUtilsMessengerEXT * pMessenger ) +{ + return pfnVkCreateDebugUtilsMessengerEXT( instance, pCreateInfo, pAllocator, pMessenger ); +} + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugUtilsMessengerEXT( VkInstance instance, + VkDebugUtilsMessengerEXT messenger, + VkAllocationCallbacks const * pAllocator ) +{ + return pfnVkDestroyDebugUtilsMessengerEXT( instance, messenger, pAllocator ); +} + +VKAPI_ATTR VkBool32 VKAPI_CALL debugMessageFunc( VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + VkDebugUtilsMessengerCallbackDataEXT const * pCallbackData, + void * /*pUserData*/ ) +{ + std::string message; + + message += vk::to_string( static_cast( messageSeverity ) ) + ": " + + vk::to_string( static_cast( messageTypes ) ) + ":\n"; + message += std::string( "\t" ) + "messageIDName = <" + pCallbackData->pMessageIdName + ">\n"; + message += std::string( "\t" ) + "messageIdNumber = " + std::to_string( pCallbackData->messageIdNumber ) + "\n"; + message += std::string( "\t" ) + "message = <" + pCallbackData->pMessage + ">\n"; + if ( 0 < pCallbackData->queueLabelCount ) + { + message += std::string( "\t" ) + "Queue Labels:\n"; + for ( uint8_t i = 0; i < pCallbackData->queueLabelCount; i++ ) + { + message += std::string( "\t\t" ) + "labelName = <" + pCallbackData->pQueueLabels[i].pLabelName + ">\n"; + } + } + if ( 0 < pCallbackData->cmdBufLabelCount ) + { + message += std::string( "\t" ) + "CommandBuffer Labels:\n"; + for ( uint8_t i = 0; i < pCallbackData->cmdBufLabelCount; i++ ) + { + message += std::string( "\t\t" ) + "labelName = <" + pCallbackData->pCmdBufLabels[i].pLabelName + ">\n"; + } + } + if ( 0 < pCallbackData->objectCount ) + { + for ( uint8_t i = 0; i < pCallbackData->objectCount; i++ ) + { + message += std::string( "\t" ) + "Object " + std::to_string( i ) + "\n"; + message += std::string( "\t\t" ) + "objectType = " + + vk::to_string( static_cast( pCallbackData->pObjects[i].objectType ) ) + "\n"; + message += + std::string( "\t\t" ) + "objectHandle = " + std::to_string( pCallbackData->pObjects[i].objectHandle ) + "\n"; + if ( pCallbackData->pObjects[i].pObjectName ) + { + message += std::string( "\t\t" ) + "objectName = <" + pCallbackData->pObjects[i].pObjectName + ">\n"; + } + } + } + +#ifdef _WIN32 + MessageBox( NULL, message.c_str(), "Alert", MB_OK ); +#else + std::cout << message << std::endl; +#endif + + return false; +} + +bool checkLayers( std::vector const & layers, std::vector const & properties ) +{ + // return true if all layers are listed in the properties + return std::all_of( layers.begin(), layers.end(), [&properties]( char const * name ) { + return std::find_if( properties.begin(), properties.end(), [&name]( vk::LayerProperties const & property ) { + return strcmp( property.layerName, name ) == 0; + } ) != properties.end(); + } ); +} + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + + std::vector instanceLayerProperties = context->enumerateInstanceLayerProperties(); + + /* VULKAN_KEY_START */ + + // Use standard_validation meta layer that enables all recommended validation layers + std::vector instanceLayerNames; + instanceLayerNames.push_back( "VK_LAYER_KHRONOS_validation" ); + if ( !checkLayers( instanceLayerNames, instanceLayerProperties ) ) + { + std::cout << "Set the environment variable VK_LAYER_PATH to point to the location of your layers" << std::endl; + exit( 1 ); + } + + /* Enable debug callback extension */ + std::vector instanceExtensionNames; + instanceExtensionNames.push_back( VK_EXT_DEBUG_UTILS_EXTENSION_NAME ); + + vk::ApplicationInfo applicationInfo( AppName, 1, EngineName, 1, VK_API_VERSION_1_1 ); + vk::InstanceCreateInfo instanceCreateInfo( {}, &applicationInfo, instanceLayerNames, instanceExtensionNames ); + std::unique_ptr instance = vk::raii::su::make_unique( *context, instanceCreateInfo ); + + pfnVkCreateDebugUtilsMessengerEXT = + reinterpret_cast( instance->getProcAddr( "vkCreateDebugUtilsMessengerEXT" ) ); + if ( !pfnVkCreateDebugUtilsMessengerEXT ) + { + std::cout << "GetInstanceProcAddr: Unable to find pfnVkCreateDebugUtilsMessengerEXT function." << std::endl; + exit( 1 ); + } + + pfnVkDestroyDebugUtilsMessengerEXT = reinterpret_cast( + instance->getProcAddr( "vkDestroyDebugUtilsMessengerEXT" ) ); + if ( !pfnVkDestroyDebugUtilsMessengerEXT ) + { + std::cout << "GetInstanceProcAddr: Unable to find pfnVkDestroyDebugUtilsMessengerEXT function." << std::endl; + exit( 1 ); + } + + vk::DebugUtilsMessageSeverityFlagsEXT severityFlags( vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning | + vk::DebugUtilsMessageSeverityFlagBitsEXT::eError ); + vk::DebugUtilsMessageTypeFlagsEXT messageTypeFlags( vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral | + vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance | + vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation ); + vk::DebugUtilsMessengerCreateInfoEXT debugUtilsMessengerCreateInfoEXT( + {}, severityFlags, messageTypeFlags, &debugMessageFunc ); + std::unique_ptr debugUtilsMessenger = + vk::raii::su::make_unique( *instance, debugUtilsMessengerCreateInfoEXT ); + + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + // get the index of the first queue family that supports graphics + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevice->getQueueFamilyProperties() ); + + float queuePriority = 0.0f; + vk::DeviceQueueCreateInfo deviceQueueCreateInfo( {}, graphicsQueueFamilyIndex, 1, &queuePriority ); + vk::DeviceCreateInfo deviceCreateInfo( {}, deviceQueueCreateInfo ); + std::unique_ptr device = vk::raii::su::make_unique( *physicalDevice, deviceCreateInfo ); + + // Create a vk::CommandPool (not a vk::raii::CommandPool, for testing purposes!) + vk::CommandPoolCreateInfo commandPoolCreateInfo( {}, graphicsQueueFamilyIndex ); + vk::CommandPool commandPool = + ( **device ).createCommandPool( commandPoolCreateInfo, nullptr, *device->getDispatcher() ); + + // The commandPool is not destroyed automatically (as it's not a UniqueCommandPool. + // That is, the device is destroyed before the commmand pool and will trigger a validation error. + std::cout << "*** INTENTIONALLY destroying the Device before destroying a CommandPool ***\n"; + std::cout << "*** The following error message is EXPECTED ***\n"; + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/EnumerateDevicesAdvanced/CMakeLists.txt b/RAII_Samples/EnumerateDevicesAdvanced/CMakeLists.txt new file mode 100644 index 0000000..1c2f7b4 --- /dev/null +++ b/RAII_Samples/EnumerateDevicesAdvanced/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_EnumerateDevicesAdvanced) + +set(HEADERS +) + +set(SOURCES + EnumerateDevicesAdvanced.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_EnumerateDevicesAdvanced + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_EnumerateDevicesAdvanced PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_EnumerateDevicesAdvanced PRIVATE utils) diff --git a/RAII_Samples/EnumerateDevicesAdvanced/EnumerateDevicesAdvanced.cpp b/RAII_Samples/EnumerateDevicesAdvanced/EnumerateDevicesAdvanced.cpp new file mode 100644 index 0000000..115de41 --- /dev/null +++ b/RAII_Samples/EnumerateDevicesAdvanced/EnumerateDevicesAdvanced.cpp @@ -0,0 +1,86 @@ +// Copyright(c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : EnumerateDevicesAdvanced +// Enumerate physical devices + +#include "../utils/utils.hpp" +#include "vulkan/vulkan_raii.hpp" + +#include +#include + +static char const * AppName = "EnumerateDevicesAdvanced"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + + /* VULKAN_HPP_KEY_START */ + + // enumerate the physicalDevices + vk::raii::PhysicalDevices physicalDevices( *instance ); + + for ( auto const & pdh : physicalDevices ) + { + vk::PhysicalDeviceProperties properties = pdh.getProperties(); + + std::cout << "apiVersion: "; + std::cout << ( ( properties.apiVersion >> 22 ) & 0xfff ) << '.'; // Major. + std::cout << ( ( properties.apiVersion >> 12 ) & 0x3ff ) << '.'; // Minor. + std::cout << ( properties.apiVersion & 0xfff ); // Patch. + std::cout << '\n'; + + std::cout << "driverVersion: " << properties.driverVersion << '\n'; + + std::cout << std::showbase << std::internal << std::setfill( '0' ) << std::hex; + std::cout << "vendorId: " << std::setw( 6 ) << properties.vendorID << '\n'; + std::cout << "deviceId: " << std::setw( 6 ) << properties.deviceID << '\n'; + std::cout << std::noshowbase << std::right << std::setfill( ' ' ) << std::dec; + + std::cout << "deviceType: " << vk::to_string( properties.deviceType ) << "\n"; + + std::cout << "deviceName: " << properties.deviceName << '\n'; + + std::cout << "pipelineCacheUUID: " << vk::su::UUID( properties.pipelineCacheUUID ) << "\n\n"; + } + + /* VULKAN_HPP_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/Events/CMakeLists.txt b/RAII_Samples/Events/CMakeLists.txt new file mode 100644 index 0000000..e1ba10f --- /dev/null +++ b/RAII_Samples/Events/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_Events) + +set(HEADERS +) + +set(SOURCES + Events.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_Events + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_Events PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_Events PRIVATE utils) diff --git a/RAII_Samples/Events/Events.cpp b/RAII_Samples/Events/Events.cpp new file mode 100644 index 0000000..cc94807 --- /dev/null +++ b/RAII_Samples/Events/Events.cpp @@ -0,0 +1,162 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : Events +// Use basic events + +#include "../utils/utils.hpp" +#include "vulkan/vulkan_raii.hpp" + +#include + +static char const * AppName = "Events"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevice->getQueueFamilyProperties() ); + std::unique_ptr device = + vk::raii::su::makeUniqueDevice( *physicalDevice, graphicsQueueFamilyIndex, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsQueueFamilyIndex ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsQueueFamilyIndex, 0 ); + + /* VULKAN_KEY_START */ + + // Start with a trivial command buffer and make sure fence wait doesn't time out + commandBuffer->begin( {} ); + commandBuffer->setViewport( 0, vk::Viewport( 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 1.0f ) ); + commandBuffer->end(); + + std::unique_ptr fence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::SubmitInfo submitInfo( {}, {}, **commandBuffer ); + graphicsQueue->submit( submitInfo, **fence ); + + // Make sure timeout is long enough for a simple command buffer without waiting for an event + vk::Result result; + int timeouts = -1; + do + { + result = device->waitForFences( { **fence }, true, vk::su::FenceTimeout ); + timeouts++; + } while ( result == vk::Result::eTimeout ); + assert( result == vk::Result::eSuccess ); + if ( timeouts != 0 ) + { + std::cout << "Unsuitable timeout value, exiting\n"; + exit( -1 ); + } + + // Now create an event and wait for it on the GPU + std::unique_ptr event = vk::raii::su::make_unique( *device, vk::EventCreateInfo() ); + + commandBuffer->reset( vk::CommandBufferResetFlags() ); + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer->waitEvents( { **event }, + vk::PipelineStageFlagBits::eHost, + vk::PipelineStageFlagBits::eBottomOfPipe, + nullptr, + nullptr, + nullptr ); + commandBuffer->end(); + device->resetFences( { **fence } ); + + // Note that stepping through this code in the debugger is a bad idea because the GPU can TDR waiting for the event. + // Execute the code from vk::Queue::submit() through vk::Device::setEvent() without breakpoints + graphicsQueue->submit( submitInfo, **fence ); + + // We should timeout waiting for the fence because the GPU should be waiting on the event + result = device->waitForFences( { **fence }, true, vk::su::FenceTimeout ); + if ( result != vk::Result::eTimeout ) + { + std::cout << "Didn't get expected timeout in vk::Device::waitForFences, exiting\n"; + exit( -1 ); + } + + // Set the event from the CPU and wait for the fence. + // This should succeed since we set the event + event->set(); + do + { + result = device->waitForFences( { **fence }, true, vk::su::FenceTimeout ); + } while ( result == vk::Result::eTimeout ); + assert( result == vk::Result::eSuccess ); + + commandBuffer->reset( {} ); + device->resetFences( { **fence } ); + event->reset(); + + // Now set the event from the GPU and wait on the CPU + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer->setEvent( **event, vk::PipelineStageFlagBits::eBottomOfPipe ); + commandBuffer->end(); + + // Look for the event on the CPU. It should be vk::Result::eEventReset since we haven't sent the command buffer yet. + result = event->getStatus(); + assert( result == vk::Result::eEventReset ); + + // Send the command buffer and loop waiting for the event + graphicsQueue->submit( submitInfo, **fence ); + + int polls = 0; + do + { + result = event->getStatus(); + polls++; + } while ( result != vk::Result::eEventSet ); + printf( "%d polls to find the event set\n", polls ); + + do + { + result = device->waitForFences( { **fence }, true, vk::su::FenceTimeout ); + } while ( result == vk::Result::eTimeout ); + assert( result == vk::Result::eSuccess ); + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/ImmutableSampler/CMakeLists.txt b/RAII_Samples/ImmutableSampler/CMakeLists.txt new file mode 100644 index 0000000..b8eee7f --- /dev/null +++ b/RAII_Samples/ImmutableSampler/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_ImmutableSampler) + +set(HEADERS +) + +set(SOURCES + ImmutableSampler.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_ImmutableSampler + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_ImmutableSampler PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_ImmutableSampler PRIVATE utils) diff --git a/RAII_Samples/ImmutableSampler/ImmutableSampler.cpp b/RAII_Samples/ImmutableSampler/ImmutableSampler.cpp new file mode 100644 index 0000000..5fdd09d --- /dev/null +++ b/RAII_Samples/ImmutableSampler/ImmutableSampler.cpp @@ -0,0 +1,238 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : ImmutableSampler +// Use an immutable sampler to texture a cube. + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wmissing-braces" +#elif defined( __GNUC__ ) +# if ( 9 <= __GNUC__ ) +# pragma GCC diagnostic ignored "-Winit-list-lifetime" +# endif +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan_raii.hpp" + +#include +#include + +static char const * AppName = "ImmutableSampler"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcMatrix ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, depthBufferData.format ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + vk::raii::su::BufferData vertexBufferData( + *physicalDevice, *device, sizeof( texturedCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + vk::raii::su::copyToDevice( + *vertexBufferData.deviceMemory, texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); + + /* VULKAN_KEY_START */ + + vk::raii::su::TextureData textureData( *physicalDevice, *device ); + + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + textureData.setImage( *commandBuffer, vk::su::CheckerboardImageGenerator() ); + + std::array bindings = { + vk::DescriptorSetLayoutBinding( 0, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex ), + vk::DescriptorSetLayoutBinding( + 1, vk::DescriptorType::eCombinedImageSampler, vk::ShaderStageFlagBits::eFragment, **textureData.sampler ) + }; + vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo( {}, bindings ); + std::unique_ptr descriptorSetLayout = + vk::raii::su::make_unique( *device, descriptorSetLayoutCreateInfo ); + + // Create pipeline layout + vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo( {}, **descriptorSetLayout ); + std::unique_ptr pipelineLayout = + vk::raii::su::make_unique( *device, pipelineLayoutCreateInfo ); + + // Create a single pool to contain data for our descriptor set + std::array poolSizes = { vk::DescriptorPoolSize( vk::DescriptorType::eUniformBuffer, 1 ), + vk::DescriptorPoolSize( + vk::DescriptorType::eCombinedImageSampler, 1 ) }; + vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo( + vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 1, poolSizes ); + std::unique_ptr descriptorPool = + vk::raii::su::make_unique( *device, descriptorPoolCreateInfo ); + + // Populate descriptor sets + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( **descriptorPool, **descriptorSetLayout ); + std::unique_ptr descriptorSet = vk::raii::su::make_unique( + std::move( vk::raii::DescriptorSets( *device, descriptorSetAllocateInfo ).front() ) ); + + vk::DescriptorBufferInfo bufferInfo( **uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::DescriptorImageInfo imageInfo( + **textureData.sampler, **textureData.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + std::array writeDescriptorSets = { + vk::WriteDescriptorSet( **descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), + vk::WriteDescriptorSet( **descriptorSet, 1, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo ) + }; + device->updateDescriptorSets( writeDescriptorSets, nullptr ); + + /* VULKAN_KEY_END */ + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + std::unique_ptr graphicsPipeline = vk::raii::su::makeUniqueGraphicsPipeline( + *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + sizeof( texturedCubeData[0] ), + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + vk::FrontFace::eClockwise, + true, + *pipelineLayout, + *renderPass ); + + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + vk::RenderPassBeginInfo renderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + commandBuffer->bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, nullptr ); + + commandBuffer->bindVertexBuffers( 0, { **vertexBufferData.buffer }, { 0 } ); + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + commandBuffer->draw( 12 * 3, 1, 0, 0 ); + commandBuffer->endRenderPass(); + commandBuffer->end(); + + vk::raii::su::submitAndWait( *device, *graphicsQueue, *commandBuffer ); + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + device->waitIdle(); + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/InitTexture/CMakeLists.txt b/RAII_Samples/InitTexture/CMakeLists.txt new file mode 100644 index 0000000..ca68361 --- /dev/null +++ b/RAII_Samples/InitTexture/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_InitTexture) + +set(HEADERS +) + +set(SOURCES + InitTexture.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_InitTexture + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_InitTexture PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_InitTexture PRIVATE utils) diff --git a/RAII_Samples/InitTexture/InitTexture.cpp b/RAII_Samples/InitTexture/InitTexture.cpp new file mode 100644 index 0000000..1d94ca2 --- /dev/null +++ b/RAII_Samples/InitTexture/InitTexture.cpp @@ -0,0 +1,230 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : InitTexture +// Initialize texture + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wunused-variable" +#elif defined( __GNUC__ ) +# pragma GCC diagnostic ignored "-Wunused-but-set-variable" +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../samples/utils/geometries.hpp" +#include "../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan_raii.hpp" + +#include + +static char const * AppName = "InitTexture"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 50, 50 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + /* VULKAN_KEY_START */ + + vk::Format format = vk::Format::eR8G8B8A8Unorm; + vk::FormatProperties formatProperties = physicalDevice->getFormatProperties( format ); + + // See if we can use a linear tiled image for a texture, if not, we will need a staging buffer for the texture data + bool needsStaging = !( formatProperties.linearTilingFeatures & vk::FormatFeatureFlagBits::eSampledImage ); + + vk::ImageCreateInfo imageCreateInfo( + {}, + vk::ImageType::e2D, + format, + vk::Extent3D( surfaceData.extent, 1 ), + 1, + 1, + vk::SampleCountFlagBits::e1, + needsStaging ? vk::ImageTiling::eOptimal : vk::ImageTiling::eLinear, + vk::ImageUsageFlagBits::eSampled | + ( needsStaging ? vk::ImageUsageFlagBits::eTransferDst : vk::ImageUsageFlagBits() ), + vk::SharingMode::eExclusive, + {}, + needsStaging ? vk::ImageLayout::eUndefined : vk::ImageLayout::ePreinitialized ); + std::unique_ptr image = vk::raii::su::make_unique( *device, imageCreateInfo ); + + vk::MemoryRequirements memoryRequirements = image->getMemoryRequirements(); + uint32_t memoryTypeIndex = vk::su::findMemoryType( + physicalDevice->getMemoryProperties(), + memoryRequirements.memoryTypeBits, + needsStaging ? vk::MemoryPropertyFlags() + : ( vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ) ); + + // allocate memory + vk::MemoryAllocateInfo memoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ); + std::unique_ptr imageMemory = + vk::raii::su::make_unique( *device, memoryAllocateInfo ); + + // bind memory + image->bindMemory( **imageMemory, 0 ); + + std::unique_ptr textureBuffer; + std::unique_ptr textureBufferMemory; + if ( needsStaging ) + { + // Need a staging buffer to map and copy texture into + vk::BufferCreateInfo bufferCreateInfo( + {}, surfaceData.extent.width * surfaceData.extent.height * 4, vk::BufferUsageFlagBits::eTransferSrc ); + textureBuffer = vk::raii::su::make_unique( *device, bufferCreateInfo ); + + memoryRequirements = textureBuffer->getMemoryRequirements(); + memoryTypeIndex = + vk::su::findMemoryType( physicalDevice->getMemoryProperties(), + memoryRequirements.memoryTypeBits, + vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ); + + // allocate memory + memoryAllocateInfo = vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ); + textureBufferMemory = vk::raii::su::make_unique( *device, memoryAllocateInfo ); + + // bind memory + textureBuffer->bindMemory( **textureBufferMemory, 0 ); + } + else + { + vk::SubresourceLayout subresourceLayout = + image->getSubresourceLayout( vk::ImageSubresource( vk::ImageAspectFlagBits::eColor ) ); + } + + void * data = needsStaging ? textureBufferMemory->mapMemory( 0, memoryRequirements.size, vk::MemoryMapFlags() ) + : imageMemory->mapMemory( 0, memoryRequirements.size, vk::MemoryMapFlags() ); + + // Checkerboard of 16x16 pixel squares + unsigned char * pImageMemory = static_cast( data ); + for ( uint32_t row = 0; row < surfaceData.extent.height; row++ ) + { + for ( uint32_t col = 0; col < surfaceData.extent.width; col++ ) + { + unsigned char rgb = ( ( ( row & 0x10 ) == 0 ) ^ ( ( col & 0x10 ) == 0 ) ) * 255; + pImageMemory[0] = rgb; + pImageMemory[1] = rgb; + pImageMemory[2] = rgb; + pImageMemory[3] = 255; + pImageMemory += 4; + } + } + + needsStaging ? textureBufferMemory->unmapMemory() : imageMemory->unmapMemory(); + + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + if ( needsStaging ) + { + // Since we're going to blit to the texture image, set its layout to eTransferDstOptimal + vk::raii::su::setImageLayout( + *commandBuffer, **image, format, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); + vk::BufferImageCopy copyRegion( 0, + surfaceData.extent.width, + surfaceData.extent.height, + vk::ImageSubresourceLayers( vk::ImageAspectFlagBits::eColor, 0, 0, 1 ), + vk::Offset3D( 0, 0, 0 ), + vk::Extent3D( surfaceData.extent, 1 ) ); + commandBuffer->copyBufferToImage( **textureBuffer, **image, vk::ImageLayout::eTransferDstOptimal, copyRegion ); + // Set the layout for the texture image from eTransferDstOptimal to SHADER_READ_ONLY + vk::raii::su::setImageLayout( *commandBuffer, + **image, + format, + vk::ImageLayout::eTransferDstOptimal, + vk::ImageLayout::eShaderReadOnlyOptimal ); + } + else + { + // If we can use the linear tiled image as a texture, just do it + vk::raii::su::setImageLayout( + *commandBuffer, **image, format, vk::ImageLayout::ePreinitialized, vk::ImageLayout::eShaderReadOnlyOptimal ); + } + + commandBuffer->end(); + vk::raii::su::submitAndWait( *device, *graphicsQueue, *commandBuffer ); + + vk::SamplerCreateInfo samplerCreateInfo( {}, + vk::Filter::eNearest, + vk::Filter::eNearest, + vk::SamplerMipmapMode::eNearest, + vk::SamplerAddressMode::eClampToEdge, + vk::SamplerAddressMode::eClampToEdge, + vk::SamplerAddressMode::eClampToEdge, + 0.0f, + false, + 1.0f, + false, + vk::CompareOp::eNever, + 0.0f, + 0.0f, + vk::BorderColor::eFloatOpaqueWhite ); + std::unique_ptr sampler = vk::raii::su::make_unique( *device, samplerCreateInfo ); + + vk::ComponentMapping componentMapping( + vk::ComponentSwizzle::eR, vk::ComponentSwizzle::eG, vk::ComponentSwizzle::eB, vk::ComponentSwizzle::eA ); + vk::ImageSubresourceRange imageSubresourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ); + vk::ImageViewCreateInfo imageViewCreateInfo( + {}, **image, vk::ImageViewType::e2D, format, componentMapping, imageSubresourceRange ); + std::unique_ptr imageView = + vk::raii::su::make_unique( *device, imageViewCreateInfo ); + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/InputAttachment/CMakeLists.txt b/RAII_Samples/InputAttachment/CMakeLists.txt new file mode 100644 index 0000000..3de3a3c --- /dev/null +++ b/RAII_Samples/InputAttachment/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_InputAttachment) + +set(HEADERS +) + +set(SOURCES + InputAttachment.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_InputAttachment + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_InputAttachment PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_InputAttachment PRIVATE utils) diff --git a/RAII_Samples/InputAttachment/InputAttachment.cpp b/RAII_Samples/InputAttachment/InputAttachment.cpp new file mode 100644 index 0000000..de0c2a6 --- /dev/null +++ b/RAII_Samples/InputAttachment/InputAttachment.cpp @@ -0,0 +1,319 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : InputAttachment +// Use an input attachment to draw a yellow triangle + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wmissing-braces" +#elif defined( __GNUC__ ) +// no need to ignore any warnings with GCC +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan_raii.hpp" + +#include +#include + +static char const * AppName = "InputAttachment"; +static char const * EngineName = "Vulkan.hpp"; + +static std::string vertexShaderText = R"( +#version 450 + +vec2 vertices[3]; + +void main() +{ + vertices[0] = vec2(-1.0f, -1.0f); + vertices[1] = vec2( 1.0f, -1.0f); + vertices[2] = vec2( 0.0f, 1.0f); + + gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0f, 1.0f); +} +)"; + +// Use subpassLoad to read from input attachment +static const char * fragmentShaderText = R"( +#version 450 + +layout (input_attachment_index = 0, set = 0, binding = 0) uniform subpassInput inputAttachment; + +layout (location = 0) out vec4 outColor; + +void main() +{ + outColor = subpassLoad(inputAttachment); +} +)"; +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::FormatProperties formatProperties = physicalDevice->getFormatProperties( vk::Format::eR8G8B8A8Unorm ); + if ( !( formatProperties.optimalTilingFeatures & vk::FormatFeatureFlagBits::eColorAttachment ) ) + { + std::cout << "vk::Format::eR8G8B8A8Unorm format unsupported for input attachment\n"; + exit( -1 ); + } + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + /* VULKAN_KEY_START */ + + // Create a framebuffer with 2 attachments, one the color attachment the shaders render into, and the other an input + // attachment which will be cleared to yellow, and then used by the shaders to color the drawn triangle. Final + // result should be a yellow triangle + + // Create the image that will be used as the input attachment + // The image for the color attachment is the presentable image already created as part of the SwapChainData + vk::ImageCreateInfo imageCreateInfo( {}, + vk::ImageType::e2D, + swapChainData.colorFormat, + vk::Extent3D( surfaceData.extent, 1 ), + 1, + 1, + vk::SampleCountFlagBits::e1, + vk::ImageTiling::eOptimal, + vk::ImageUsageFlagBits::eInputAttachment | + vk::ImageUsageFlagBits::eTransferDst ); + std::unique_ptr inputImage = vk::raii::su::make_unique( *device, imageCreateInfo ); + + vk::MemoryRequirements memoryRequirements = inputImage->getMemoryRequirements(); + uint32_t memoryTypeIndex = + vk::su::findMemoryType( physicalDevice->getMemoryProperties(), memoryRequirements.memoryTypeBits, {} ); + vk::MemoryAllocateInfo memoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ); + std::unique_ptr inputMemory = + vk::raii::su::make_unique( *device, memoryAllocateInfo ); + inputImage->bindMemory( **inputMemory, 0 ); + + // Set the image layout to TRANSFER_DST_OPTIMAL to be ready for clear + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + vk::raii::su::setImageLayout( *commandBuffer, + **inputImage, + swapChainData.colorFormat, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eTransferDstOptimal ); + + vk::ClearColorValue clearColorValue( std::array( { { 1.0f, 1.0f, 0.0f, 0.0f } } ) ); + vk::ImageSubresourceRange imageSubresourceRange( + vk::ImageAspectFlagBits::eColor, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS ); + commandBuffer->clearColorImage( + **inputImage, vk::ImageLayout::eTransferDstOptimal, clearColorValue, imageSubresourceRange ); + + // Set the image layout to SHADER_READONLY_OPTIMAL for use by the shaders + vk::raii::su::setImageLayout( *commandBuffer, + **inputImage, + swapChainData.colorFormat, + vk::ImageLayout::eTransferDstOptimal, + vk::ImageLayout::eShaderReadOnlyOptimal ); + + vk::ComponentMapping componentMapping( + vk::ComponentSwizzle::eR, vk::ComponentSwizzle::eG, vk::ComponentSwizzle::eB, vk::ComponentSwizzle::eA ); + imageSubresourceRange = vk::ImageSubresourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ); + vk::ImageViewCreateInfo imageViewCreateInfo( + {}, **inputImage, vk::ImageViewType::e2D, swapChainData.colorFormat, componentMapping, imageSubresourceRange ); + std::unique_ptr inputAttachmentView = + vk::raii::su::make_unique( *device, imageViewCreateInfo ); + + vk::DescriptorSetLayoutBinding layoutBinding( + 0, vk::DescriptorType::eInputAttachment, 1, vk::ShaderStageFlagBits::eFragment ); + vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo( {}, layoutBinding ); + std::unique_ptr descriptorSetLayout = + vk::raii::su::make_unique( *device, descriptorSetLayoutCreateInfo ); + + vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo( {}, **descriptorSetLayout ); + std::unique_ptr pipelineLayout = + vk::raii::su::make_unique( *device, pipelineLayoutCreateInfo ); + + std::array attachments = { + // First attachment is the color attachment - clear at the beginning of the renderpass and transition layout to + // PRESENT_SRC_KHR at the end of renderpass + vk::AttachmentDescription( {}, + swapChainData.colorFormat, + vk::SampleCountFlagBits::e1, + vk::AttachmentLoadOp::eClear, + vk::AttachmentStoreOp::eStore, + vk::AttachmentLoadOp::eDontCare, + vk::AttachmentStoreOp::eDontCare, + vk::ImageLayout::eUndefined, + vk::ImageLayout::ePresentSrcKHR ), + // Second attachment is input attachment. Once cleared it should have width*height yellow pixels. + // Doing a subpassLoad in the fragment shader should give the shader the color at the fragments x,y location from + // the input attachment + vk::AttachmentDescription( {}, + swapChainData.colorFormat, + vk::SampleCountFlagBits::e1, + vk::AttachmentLoadOp::eLoad, + vk::AttachmentStoreOp::eDontCare, + vk::AttachmentLoadOp::eDontCare, + vk::AttachmentStoreOp::eDontCare, + vk::ImageLayout::eShaderReadOnlyOptimal, + vk::ImageLayout::eShaderReadOnlyOptimal ) + }; + vk::AttachmentReference colorReference( 0, vk::ImageLayout::eColorAttachmentOptimal ); + vk::AttachmentReference inputReference( 1, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::SubpassDescription subPass( {}, vk::PipelineBindPoint::eGraphics, inputReference, colorReference ); + vk::RenderPassCreateInfo renderPassCreateInfo( {}, attachments, subPass ); + std::unique_ptr renderPass = + vk::raii::su::make_unique( *device, renderPassCreateInfo ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, inputAttachmentView, surfaceData.extent ); + + vk::DescriptorPoolSize poolSize( vk::DescriptorType::eInputAttachment, 1 ); + vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo( + vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 1, poolSize ); + std::unique_ptr descriptorPool = + vk::raii::su::make_unique( *device, descriptorPoolCreateInfo ); + + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( **descriptorPool, **descriptorSetLayout ); + std::unique_ptr descriptorSet = vk::raii::su::make_unique( + std::move( vk::raii::DescriptorSets( *device, descriptorSetAllocateInfo ).front() ) ); + + vk::DescriptorImageInfo inputImageInfo( nullptr, **inputAttachmentView, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::WriteDescriptorSet writeDescriptorSet( + **descriptorSet, 0, 0, vk::DescriptorType::eInputAttachment, inputImageInfo ); + device->updateDescriptorSets( writeDescriptorSet, nullptr ); + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + std::unique_ptr graphicsPipeline = + vk::raii::su::makeUniqueGraphicsPipeline( *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + 0, + {}, + vk::FrontFace::eClockwise, + false, + *pipelineLayout, + *renderPass ); + + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + vk::ClearValue clearValue; + clearValue.color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + vk::RenderPassBeginInfo renderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValue ); + commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + commandBuffer->bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, nullptr ); + + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + commandBuffer->draw( 3, 1, 0, 0 ); + commandBuffer->endRenderPass(); + commandBuffer->end(); + + /* VULKAN_KEY_END */ + + vk::raii::su::submitAndWait( *device, *graphicsQueue, *commandBuffer ); + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/InstanceExtensionProperties/CMakeLists.txt b/RAII_Samples/InstanceExtensionProperties/CMakeLists.txt new file mode 100644 index 0000000..d474f2f --- /dev/null +++ b/RAII_Samples/InstanceExtensionProperties/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_InstanceExtensionProperties) + +set(HEADERS +) + +set(SOURCES + InstanceExtensionProperties.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_InstanceExtensionProperties + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_InstanceExtensionProperties PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_InstanceExtensionProperties PRIVATE utils) diff --git a/RAII_Samples/InstanceExtensionProperties/InstanceExtensionProperties.cpp b/RAII_Samples/InstanceExtensionProperties/InstanceExtensionProperties.cpp new file mode 100644 index 0000000..34b56b4 --- /dev/null +++ b/RAII_Samples/InstanceExtensionProperties/InstanceExtensionProperties.cpp @@ -0,0 +1,68 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : InstanceExtensionProperties +// Get global extension properties to know what extension are available to enable at CreateInstance +// time. + +#include "../utils/utils.hpp" + +#include +#include + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + + /* VULKAN_KEY_START */ + + std::vector extensionProperties = context->enumerateInstanceExtensionProperties(); + + // sort the extensions alphabetically + + std::sort( extensionProperties.begin(), + extensionProperties.end(), + []( vk::ExtensionProperties const & a, vk::ExtensionProperties const & b ) { + return strcmp( a.extensionName, b.extensionName ) < 0; + } ); + + std::cout << "Instance Extensions:" << std::endl; + for ( auto const & ep : extensionProperties ) + { + std::cout << ep.extensionName << ":" << std::endl; + std::cout << "\tVersion: " << ep.specVersion << std::endl; + std::cout << std::endl; + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/InstanceLayerExtensionProperties/CMakeLists.txt b/RAII_Samples/InstanceLayerExtensionProperties/CMakeLists.txt new file mode 100644 index 0000000..143432d --- /dev/null +++ b/RAII_Samples/InstanceLayerExtensionProperties/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_InstanceLayerExtensionProperties) + +set(HEADERS +) + +set(SOURCES + InstanceLayerExtensionProperties.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_InstanceLayerExtensionProperties + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_InstanceLayerExtensionProperties PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_InstanceLayerExtensionProperties PRIVATE utils) diff --git a/RAII_Samples/InstanceLayerExtensionProperties/InstanceLayerExtensionProperties.cpp b/RAII_Samples/InstanceLayerExtensionProperties/InstanceLayerExtensionProperties.cpp new file mode 100644 index 0000000..062b844 --- /dev/null +++ b/RAII_Samples/InstanceLayerExtensionProperties/InstanceLayerExtensionProperties.cpp @@ -0,0 +1,103 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : InstanceLayerExtensionProperties +// Get list of global layers and their associated extensions, if any. + +#include "../utils/utils.hpp" + +#include +#include +#include + +struct PropertyData +{ + PropertyData( vk::LayerProperties const & layerProperties_, + std::vector const & extensionProperties_ ) + : layerProperties( layerProperties_ ), extensionProperties( extensionProperties_ ) + {} + + vk::LayerProperties layerProperties; + std::vector extensionProperties; +}; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::vector layerProperties = context->enumerateInstanceLayerProperties(); + + /* VULKAN_KEY_START */ + + std::vector propertyData; + propertyData.reserve( layerProperties.size() ); + + for ( auto const & layerProperty : layerProperties ) + { + std::vector extensionProperties = + context->enumerateInstanceExtensionProperties( vk::Optional( layerProperty.layerName ) ); + propertyData.emplace_back( layerProperty, extensionProperties ); + } + + /* VULKAN_KEY_END */ + + std::cout << "Instance Layers:" << std::endl; + if ( propertyData.empty() ) + { + std::cout << "Set the environment variable VK_LAYER_PATH to point to the location of your layers" << std::endl; + } + else + { + for ( auto const & pd : propertyData ) + { + std::cout << pd.layerProperties.layerName << std::endl; + std::cout << "Layer Extensions: "; + if ( pd.extensionProperties.empty() ) + { + std::cout << "None"; + } + else + { + for ( auto it = pd.extensionProperties.begin(); it != pd.extensionProperties.end(); ++it ) + { + if ( it != pd.extensionProperties.begin() ) + { + std::cout << ", "; + } + std::cout << it->extensionName << " Version " << it->specVersion; + } + } + std::cout << std::endl << std::endl; + } + } + std::cout << std::endl; + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/InstanceLayerProperties/CMakeLists.txt b/RAII_Samples/InstanceLayerProperties/CMakeLists.txt new file mode 100644 index 0000000..30249cc --- /dev/null +++ b/RAII_Samples/InstanceLayerProperties/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_InstanceLayerProperties) + +set(HEADERS +) + +set(SOURCES +InstanceLayerProperties.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_InstanceLayerProperties +${HEADERS} +${SOURCES} +) + +set_target_properties(RAII_InstanceLayerProperties PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_InstanceLayerProperties PRIVATE utils) diff --git a/RAII_Samples/InstanceLayerProperties/InstanceLayerProperties.cpp b/RAII_Samples/InstanceLayerProperties/InstanceLayerProperties.cpp new file mode 100644 index 0000000..39ab75a --- /dev/null +++ b/RAII_Samples/InstanceLayerProperties/InstanceLayerProperties.cpp @@ -0,0 +1,67 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : InstanceLayerProperties +// Get global layer properties to know what layers are available to enable at CreateInstance time. + +#include "../utils/utils.hpp" + +#include +#include +#include + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + + /* VULKAN_KEY_START */ + + std::vector layerProperties = context->enumerateInstanceLayerProperties(); + + std::cout << "Instance Layers:" << std::endl; + if ( layerProperties.empty() ) + { + std::cout << "Set the environment variable VK_LAYER_PATH to point to the location of your layers" << std::endl; + } + for ( auto const & lp : layerProperties ) + { + std::cout << lp.layerName << ":" << std::endl; + std::cout << "\tVersion: " << lp.implementationVersion << std::endl; + std::cout << "\tAPI Version: (" << ( lp.specVersion >> 22 ) << "." << ( ( lp.specVersion >> 12 ) & 0x03FF ) << "." + << ( lp.specVersion & 0xFFF ) << ")" << std::endl; + std::cout << "\tDescription: " << lp.description << std::endl; + std::cout << std::endl; + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::runtexceptionime_error: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/InstanceVersion/CMakeLists.txt b/RAII_Samples/InstanceVersion/CMakeLists.txt new file mode 100644 index 0000000..129b56e --- /dev/null +++ b/RAII_Samples/InstanceVersion/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_InstanceVersion) + +set(HEADERS +) + +set(SOURCES +InstanceVersion.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_InstanceVersion +${HEADERS} +${SOURCES} +) + +set_target_properties(RAII_InstanceVersion PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_InstanceVersion PRIVATE utils) diff --git a/RAII_Samples/InstanceVersion/InstanceVersion.cpp b/RAII_Samples/InstanceVersion/InstanceVersion.cpp new file mode 100644 index 0000000..2ad63ec --- /dev/null +++ b/RAII_Samples/InstanceVersion/InstanceVersion.cpp @@ -0,0 +1,58 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : InstanceVersion +// Get the version of instance-level functionality supported by the implementation. + +#include "../utils/utils.hpp" + +#include +#include + +std::string decodeAPIVersion( uint32_t apiVersion ) +{ + return std::to_string( VK_VERSION_MAJOR( apiVersion ) ) + "." + std::to_string( VK_VERSION_MINOR( apiVersion ) ) + + "." + std::to_string( VK_VERSION_PATCH( apiVersion ) ); +} + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + + /* VULKAN_KEY_START */ + + uint32_t apiVersion = context->enumerateInstanceVersion(); + std::cout << "APIVersion = " << decodeAPIVersion( apiVersion ) << std::endl; + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/MultipleSets/CMakeLists.txt b/RAII_Samples/MultipleSets/CMakeLists.txt new file mode 100644 index 0000000..c1fec5d --- /dev/null +++ b/RAII_Samples/MultipleSets/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_MultipleSets) + +set(HEADERS +) + +set(SOURCES + MultipleSets.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_MultipleSets + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_MultipleSets PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_MultipleSets PRIVATE utils) diff --git a/RAII_Samples/MultipleSets/MultipleSets.cpp b/RAII_Samples/MultipleSets/MultipleSets.cpp new file mode 100644 index 0000000..3941a39 --- /dev/null +++ b/RAII_Samples/MultipleSets/MultipleSets.cpp @@ -0,0 +1,304 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : MultipleSets +// Use multiple descriptor sets to draw a textured cube. + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wmissing-braces" +#elif defined( __GNUC__ ) +# if ( 9 <= __GNUC__ ) +# pragma GCC diagnostic ignored "-Winit-list-lifetime" +# endif +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan_raii.hpp" + +#include +#include + +static char const * AppName = "MultipleSets"; +static char const * EngineName = "Vulkan.hpp"; + +const std::string vertexShaderText = R"( +#version 400 + +#extension GL_ARB_separate_shader_objects : enable +#extension GL_ARB_shading_language_420pack : enable + +layout (std140, set = 0, binding = 0) uniform buffer +{ + mat4 mvp; +} uniformBuffer; + +layout (set = 1, binding = 0) uniform sampler2D surface; + +layout (location = 0) in vec4 pos; +layout (location = 1) in vec2 inTexCoord; + +layout (location = 0) out vec4 outColor; +layout (location = 1) out vec2 outTexCoord; + +void main() +{ + outColor = texture(surface, vec2(0.0f)); + outTexCoord = inTexCoord; + gl_Position = uniformBuffer.mvp * pos; +} +)"; + +const std::string fragmentShaderText = R"( +#version 400 + +#extension GL_ARB_separate_shader_objects : enable +#extension GL_ARB_shading_language_420pack : enable + +layout (location = 0) in vec4 inColor; +layout (location = 1) in vec2 inTexCoord; + +layout (location = 0) out vec4 outColor; + +void main() +{ + outColor = inColor; + + // create a border to see the cube more easily + if ((inTexCoord.x < 0.01f) || (0.99f < inTexCoord.x) || (inTexCoord.y < 0.01f) || (0.99f < inTexCoord.y)) + { + outColor *= vec4(0.1f, 0.1f, 0.1f, 1.0f); + } +} +)"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + vk::raii::su::TextureData textureData( *physicalDevice, *device ); + + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + textureData.setImage( *commandBuffer, vk::su::MonochromeImageGenerator( { 118, 185, 0 } ) ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcMatrix ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, depthBufferData.format ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + vk::raii::su::BufferData vertexBufferData( + *physicalDevice, *device, sizeof( texturedCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + vk::raii::su::copyToDevice( + *vertexBufferData.deviceMemory, texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); + + /* VULKAN_KEY_START */ + + // Create first layout to contain uniform buffer data + vk::DescriptorSetLayoutBinding uniformBinding( + 0, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex ); + vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo( {}, uniformBinding ); + std::unique_ptr uniformLayout = + vk::raii::su::make_unique( *device, descriptorSetLayoutCreateInfo ); + + // Create second layout containing combined sampler/image data + vk::DescriptorSetLayoutBinding sampler2DBinding( + 0, vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eVertex ); + descriptorSetLayoutCreateInfo.pBindings = &sampler2DBinding; + std::unique_ptr samplerLayout = + vk::raii::su::make_unique( *device, descriptorSetLayoutCreateInfo ); + + // Create pipeline layout with multiple descriptor sets + std::array descriptorSetLayouts = { **uniformLayout, **samplerLayout }; + vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo( {}, descriptorSetLayouts ); + std::unique_ptr pipelineLayout = + vk::raii::su::make_unique( *device, pipelineLayoutCreateInfo ); + + // Create a single pool to contain data for our two descriptor sets + std::array poolSizes = { vk::DescriptorPoolSize( vk::DescriptorType::eUniformBuffer, 1 ), + vk::DescriptorPoolSize( + vk::DescriptorType::eCombinedImageSampler, 1 ) }; + vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo( + vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 2, poolSizes ); + std::unique_ptr descriptorPool = + vk::raii::su::make_unique( *device, descriptorPoolCreateInfo ); + + // Populate descriptor sets + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( **descriptorPool, descriptorSetLayouts ); + vk::raii::DescriptorSets descriptorSets( *device, descriptorSetAllocateInfo ); + + // Populate with info about our uniform buffer + vk::DescriptorBufferInfo uniformBufferInfo( **uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::DescriptorImageInfo textureImageInfo( + **textureData.sampler, **textureData.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + std::array writeDescriptorSets = { + { vk::WriteDescriptorSet( *descriptorSets[0], 0, 0, vk::DescriptorType::eUniformBuffer, {}, uniformBufferInfo ), + vk::WriteDescriptorSet( + *descriptorSets[1], 0, 0, vk::DescriptorType::eCombinedImageSampler, textureImageInfo ) } + }; + device->updateDescriptorSets( writeDescriptorSets, nullptr ); + + /* VULKAN_KEY_END */ + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + std::unique_ptr graphicsPipeline = vk::raii::su::makeUniqueGraphicsPipeline( + *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + sizeof( texturedCubeData[0] ), + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + vk::FrontFace::eClockwise, + true, + *pipelineLayout, + *renderPass ); + + // Get the index of the next available swapchain image: + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + vk::RenderPassBeginInfo renderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + commandBuffer->bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { *descriptorSets[0], *descriptorSets[1] }, nullptr ); + + vk::Buffer buffer = **vertexBufferData.buffer; + commandBuffer->bindVertexBuffers( 0, buffer, { 0 } ); + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + commandBuffer->draw( 12 * 3, 1, 0, 0 ); + commandBuffer->endRenderPass(); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + + while ( vk::Result::eTimeout == device->waitForFences( { **drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + device->waitIdle(); + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/OcclusionQuery/CMakeLists.txt b/RAII_Samples/OcclusionQuery/CMakeLists.txt new file mode 100644 index 0000000..490728f --- /dev/null +++ b/RAII_Samples/OcclusionQuery/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_OcclusionQuery) + +set(HEADERS +) + +set(SOURCES + OcclusionQuery.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_OcclusionQuery + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_OcclusionQuery PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_OcclusionQuery PRIVATE utils) diff --git a/RAII_Samples/OcclusionQuery/OcclusionQuery.cpp b/RAII_Samples/OcclusionQuery/OcclusionQuery.cpp new file mode 100644 index 0000000..b1acda4 --- /dev/null +++ b/RAII_Samples/OcclusionQuery/OcclusionQuery.cpp @@ -0,0 +1,270 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : OcclusionQuery +// Use occlusion query to determine if drawing renders any samples. + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan_raii.hpp" + +#include +#include + +static char const * AppName = "OcclusionQuery"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcMatrix ); + + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); + std::unique_ptr pipelineLayout = + vk::raii::su::makeUniquePipelineLayout( *device, *descriptorSetLayout ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, depthBufferData.format ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PC_C ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + vk::raii::su::BufferData vertexBufferData( + *physicalDevice, *device, sizeof( coloredCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + vk::raii::su::copyToDevice( + *vertexBufferData.deviceMemory, coloredCubeData, sizeof( coloredCubeData ) / sizeof( coloredCubeData[0] ) ); + + std::unique_ptr descriptorPool = + vk::raii::su::makeUniqueDescriptorPool( *device, { { vk::DescriptorType::eUniformBuffer, 1 } } ); + std::unique_ptr descriptorSet = + vk::raii::su::makeUniqueDescriptorSet( *device, *descriptorPool, *descriptorSetLayout ); + + vk::raii::su::updateDescriptorSets( + *device, *descriptorSet, { { vk::DescriptorType::eUniformBuffer, *uniformBufferData.buffer, nullptr } }, {} ); + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + std::unique_ptr graphicsPipeline = vk::raii::su::makeUniqueGraphicsPipeline( + *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + sizeof( coloredCubeData[0] ), + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + vk::FrontFace::eClockwise, + true, + *pipelineLayout, + *renderPass ); + + /* VULKAN_KEY_START */ + + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + + // Get the index of the next available swapchain image: + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + /* Allocate a uniform buffer that will take query results. */ + vk::BufferCreateInfo bufferCreateInfo( + {}, 4 * sizeof( uint64_t ), vk::BufferUsageFlagBits::eUniformBuffer | vk::BufferUsageFlagBits::eTransferDst ); + std::unique_ptr queryResultBuffer = + vk::raii::su::make_unique( *device, bufferCreateInfo ); + + vk::MemoryRequirements memoryRequirements = queryResultBuffer->getMemoryRequirements(); + uint32_t memoryTypeIndex = + vk::su::findMemoryType( physicalDevice->getMemoryProperties(), + memoryRequirements.memoryTypeBits, + vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ); + vk::MemoryAllocateInfo memoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ); + std::unique_ptr queryResultMemory = + vk::raii::su::make_unique( *device, memoryAllocateInfo ); + + queryResultBuffer->bindMemory( **queryResultMemory, 0 ); + + vk::QueryPoolCreateInfo queryPoolCreateInfo( {}, vk::QueryType::eOcclusion, 2, {} ); + std::unique_ptr queryPool = + vk::raii::su::make_unique( *device, queryPoolCreateInfo ); + + commandBuffer->begin( {} ); + commandBuffer->resetQueryPool( **queryPool, 0, 2 ); + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + commandBuffer->beginRenderPass( + vk::RenderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D(), surfaceData.extent ), clearValues ), + vk::SubpassContents::eInline ); + + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + commandBuffer->bindDescriptorSets( vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, {} ); + + commandBuffer->bindVertexBuffers( 0, { **vertexBufferData.buffer }, { 0 } ); + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + commandBuffer->beginQuery( **queryPool, 0, vk::QueryControlFlags() ); + commandBuffer->endQuery( **queryPool, 0 ); + + commandBuffer->beginQuery( **queryPool, 1, vk::QueryControlFlags() ); + commandBuffer->draw( 12 * 3, 1, 0, 0 ); + commandBuffer->endRenderPass(); + commandBuffer->endQuery( **queryPool, 1 ); + + commandBuffer->copyQueryPoolResults( **queryPool, + 0, + 2, + **queryResultBuffer, + 0, + sizeof( uint64_t ), + vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait ); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + + graphicsQueue->waitIdle(); + + std::vector poolResults; + std::tie( result, poolResults ) = queryPool->getResults( + 0, 2, 2 * sizeof( uint64_t ), sizeof( uint64_t ), vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eNotReady: + std::cout << "vk::Device::getQueryPoolResults returned vk::Result::eNotReady !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + + std::cout << "vkGetQueryPoolResults data\n"; + std::cout << "samples_passed[0] = " << poolResults[0] << "\n"; + std::cout << "samples_passed[1] = " << poolResults[1] << "\n"; + + /* Read back query result from buffer */ + uint64_t * samplesPassedPtr = + static_cast( queryResultMemory->mapMemory( 0, memoryRequirements.size, vk::MemoryMapFlags() ) ); + + std::cout << "vkCmdCopyQueryPoolResults data\n"; + std::cout << "samples_passed[0] = " << samplesPassedPtr[0] << "\n"; + std::cout << "samples_passed[1] = " << samplesPassedPtr[1] << "\n"; + + queryResultMemory->unmapMemory(); + + while ( vk::Result::eTimeout == device->waitForFences( { **drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/PhysicalDeviceExtensions/CMakeLists.txt b/RAII_Samples/PhysicalDeviceExtensions/CMakeLists.txt new file mode 100644 index 0000000..e5f6226 --- /dev/null +++ b/RAII_Samples/PhysicalDeviceExtensions/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_PhysicalDeviceExtensions) + +set(HEADERS +) + +set(SOURCES + PhysicalDeviceExtensions.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_PhysicalDeviceExtensions + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_PhysicalDeviceExtensions PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_PhysicalDeviceExtensions PRIVATE utils) diff --git a/RAII_Samples/PhysicalDeviceExtensions/PhysicalDeviceExtensions.cpp b/RAII_Samples/PhysicalDeviceExtensions/PhysicalDeviceExtensions.cpp new file mode 100644 index 0000000..a64b6d0 --- /dev/null +++ b/RAII_Samples/PhysicalDeviceExtensions/PhysicalDeviceExtensions.cpp @@ -0,0 +1,80 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : DeviceExtensionProperties +// Get extension properties per physical device. + +#include "../utils/utils.hpp" +#include "vulkan/vulkan.hpp" + +#include + +static char const * AppName = "DeviceExtensionProperties"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = vk::raii::su::makeUniqueInstance( *context, AppName, EngineName ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + + // enumerate the physicalDevices + vk::raii::PhysicalDevices physicalDevices( *instance ); + + /* VULKAN_KEY_START */ + + for ( size_t i = 0; i < physicalDevices.size(); i++ ) + { + std::vector extensionProperties = + physicalDevices[i].enumerateDeviceExtensionProperties(); + std::cout << "PhysicalDevice " << i << " : " << extensionProperties.size() << " extensions:\n"; + + // sort the extensions alphabetically + std::sort( extensionProperties.begin(), + extensionProperties.end(), + []( vk::ExtensionProperties const & a, vk::ExtensionProperties const & b ) { + return strcmp( a.extensionName, b.extensionName ) < 0; + } ); + for ( auto const & ep : extensionProperties ) + { + std::cout << "\t" << ep.extensionName << ":" << std::endl; + std::cout << "\t\tVersion: " << ep.specVersion << std::endl; + std::cout << std::endl; + } + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/PhysicalDeviceFeatures/CMakeLists.txt b/RAII_Samples/PhysicalDeviceFeatures/CMakeLists.txt new file mode 100644 index 0000000..71c047a --- /dev/null +++ b/RAII_Samples/PhysicalDeviceFeatures/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_PhysicalDeviceFeatures) + +set(HEADERS +) + +set(SOURCES + PhysicalDeviceFeatures.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_PhysicalDeviceFeatures + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_PhysicalDeviceFeatures PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_PhysicalDeviceFeatures PRIVATE utils) diff --git a/RAII_Samples/PhysicalDeviceFeatures/PhysicalDeviceFeatures.cpp b/RAII_Samples/PhysicalDeviceFeatures/PhysicalDeviceFeatures.cpp new file mode 100644 index 0000000..4238c16 --- /dev/null +++ b/RAII_Samples/PhysicalDeviceFeatures/PhysicalDeviceFeatures.cpp @@ -0,0 +1,746 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : PhysicalDeviceFeatures +// Get the fine-grained features of the physical devices that can be supported by an implementation. + +// ignore warning 4503: decorated name length exceeded, name was truncated +#if defined( _MSC_VER ) +# pragma warning( disable : 4503 ) +#elif defined( __GNUC__ ) +// don't know how to switch off that warning here +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../utils/utils.hpp" +#include "vulkan/vulkan.hpp" + +#include + +static char const * AppName = "PhysicalDeviceFeatures"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, {}, VK_API_VERSION_1_1 ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + + // enumerate the physicalDevices + vk::raii::PhysicalDevices physicalDevices( *instance ); + + /* VULKAN_KEY_START */ + + std::cout << std::boolalpha; + for ( size_t i = 0; i < physicalDevices.size(); i++ ) + { + // some features are only valid, if a corresponding extension is available! + std::vector extensionProperties = + physicalDevices[i].enumerateDeviceExtensionProperties(); + + std::cout << "PhysicalDevice " << i << " :\n"; + auto features2 = physicalDevices[i] + .getFeatures2(); + vk::PhysicalDeviceFeatures const & features = features2.get().features; + std::cout << "\tFeatures:\n"; + std::cout << "\t\talphaToOne : " << !!features.alphaToOne << "\n"; + std::cout << "\t\tdepthBiasClamp : " << !!features.depthBiasClamp << "\n"; + std::cout << "\t\tdepthBounds : " << !!features.depthBounds << "\n"; + std::cout << "\t\tdepthClamp : " << !!features.depthClamp << "\n"; + std::cout << "\t\tdrawIndirectFirstInstance : " << !!features.drawIndirectFirstInstance << "\n"; + std::cout << "\t\tdualSrcBlend : " << !!features.dualSrcBlend << "\n"; + std::cout << "\t\tfillModeNonSolid : " << !!features.fillModeNonSolid << "\n"; + std::cout << "\t\tfragmentStoresAndAtomics : " << !!features.fragmentStoresAndAtomics << "\n"; + std::cout << "\t\tfullDrawIndexUint32 : " << !!features.fullDrawIndexUint32 << "\n"; + std::cout << "\t\tgeometryShader : " << !!features.geometryShader << "\n"; + std::cout << "\t\timageCubeArray : " << !!features.imageCubeArray << "\n"; + std::cout << "\t\tindependentBlend : " << !!features.independentBlend << "\n"; + std::cout << "\t\tinheritedQueries : " << !!features.inheritedQueries << "\n"; + std::cout << "\t\tlargePoints : " << !!features.largePoints << "\n"; + std::cout << "\t\tlogicOp : " << !!features.logicOp << "\n"; + std::cout << "\t\tmultiDrawIndirect : " << !!features.multiDrawIndirect << "\n"; + std::cout << "\t\tmultiViewport : " << !!features.multiViewport << "\n"; + std::cout << "\t\tocclusionQueryPrecise : " << !!features.occlusionQueryPrecise << "\n"; + std::cout << "\t\tpipelineStatisticsQuery : " << !!features.pipelineStatisticsQuery << "\n"; + std::cout << "\t\trobustBufferAccess : " << !!features.robustBufferAccess << "\n"; + std::cout << "\t\tsamplerAnisotropy : " << !!features.samplerAnisotropy << "\n"; + std::cout << "\t\tsampleRateShading : " << !!features.sampleRateShading << "\n"; + std::cout << "\t\tshaderClipDistance : " << !!features.shaderClipDistance << "\n"; + std::cout << "\t\tshaderCullDistance : " << !!features.shaderCullDistance << "\n"; + std::cout << "\t\tshaderFloat64 : " << !!features.shaderFloat64 << "\n"; + std::cout << "\t\tshaderImageGatherExtended : " << !!features.shaderImageGatherExtended << "\n"; + std::cout << "\t\tshaderInt16 : " << !!features.shaderInt16 << "\n"; + std::cout << "\t\tshaderInt64 : " << !!features.shaderInt64 << "\n"; + std::cout << "\t\tshaderResourceMinLod : " << !!features.shaderResourceMinLod << "\n"; + std::cout << "\t\tshaderResourceResidency : " << !!features.shaderResourceResidency << "\n"; + std::cout << "\t\tshaderSampledImageArrayDynamicIndexing : " << !!features.shaderSampledImageArrayDynamicIndexing + << "\n"; + std::cout << "\t\tshaderStorageBufferArrayDynamicIndexing : " + << !!features.shaderStorageBufferArrayDynamicIndexing << "\n"; + std::cout << "\t\tshaderStorageImageArrayDynamicIndexing : " << !!features.shaderStorageImageArrayDynamicIndexing + << "\n"; + std::cout << "\t\tshaderStorageImageExtendedFormats : " << !!features.shaderStorageImageExtendedFormats + << "\n"; + std::cout << "\t\tshaderStorageImageMultisample : " << !!features.shaderStorageImageMultisample << "\n"; + std::cout << "\t\tshaderStorageImageReadWithoutFormat : " << !!features.shaderStorageImageReadWithoutFormat + << "\n"; + std::cout << "\t\tshaderStorageImageWriteWithoutFormat : " << !!features.shaderStorageImageWriteWithoutFormat + << "\n"; + std::cout << "\t\tshaderTessellationAndGeometryPointSize : " << !!features.shaderTessellationAndGeometryPointSize + << "\n"; + std::cout << "\t\tshaderUniformBufferArrayDynamicIndexing : " + << !!features.shaderUniformBufferArrayDynamicIndexing << "\n"; + std::cout << "\t\tsparseBinding : " << !!features.sparseBinding << "\n"; + std::cout << "\t\tsparseResidency16Samples : " << !!features.sparseResidency16Samples << "\n"; + std::cout << "\t\tsparseResidency2Samples : " << !!features.sparseResidency2Samples << "\n"; + std::cout << "\t\tsparseResidency4Samples : " << !!features.sparseResidency4Samples << "\n"; + std::cout << "\t\tsparseResidency8Samples : " << !!features.sparseResidency8Samples << "\n"; + std::cout << "\t\tsparseResidencyAliased : " << !!features.sparseResidencyAliased << "\n"; + std::cout << "\t\tsparseResidencyBuffer : " << !!features.sparseResidencyBuffer << "\n"; + std::cout << "\t\tsparseResidencyImage2D : " << !!features.sparseResidencyImage2D << "\n"; + std::cout << "\t\tsparseResidencyImage3D : " << !!features.sparseResidencyImage3D << "\n"; + std::cout << "\t\ttessellationShader : " << !!features.tessellationShader << "\n"; + std::cout << "\t\ttextureCompressionASTC_LDR : " << !!features.textureCompressionASTC_LDR << "\n"; + std::cout << "\t\ttextureCompressionBC : " << !!features.textureCompressionBC << "\n"; + std::cout << "\t\ttextureCompressionETC2 : " << !!features.textureCompressionETC2 << "\n"; + std::cout << "\t\tvariableMultisampleRate : " << !!features.variableMultisampleRate << "\n"; + std::cout << "\t\tvertexPipelineStoresAndAtomics : " << !!features.vertexPipelineStoresAndAtomics + << "\n"; + std::cout << "\t\twideLines : " << !!features.wideLines << "\n"; + std::cout << "\n"; + + vk::PhysicalDevice16BitStorageFeatures const & sixteenBitStorageFeatures = + features2.get(); + std::cout << "\t16BitStorageFeatures:\n"; + std::cout << "\t\tstorageBuffer16BitAccess : " << !!sixteenBitStorageFeatures.storageBuffer16BitAccess + << "\n"; + std::cout << "\t\tstorageInputOutput16 : " << !!sixteenBitStorageFeatures.storageInputOutput16 + << "\n"; + std::cout << "\t\tstoragePushConstant16 : " << !!sixteenBitStorageFeatures.storagePushConstant16 + << "\n"; + std::cout << "\t\tuniformAndStorageBuffer16BitAccess : " + << !!sixteenBitStorageFeatures.uniformAndStorageBuffer16BitAccess << "\n"; + std::cout << "\n"; + + if ( vk::su::contains( extensionProperties, "VK_KHR_8bit_storage" ) ) + { + vk::PhysicalDevice8BitStorageFeaturesKHR const & eightBitStorageFeatures = + features2.get(); + std::cout << "\t8BitStorageFeatures:\n"; + std::cout << "\t\tstorageBuffer8BitAccess : " << !!eightBitStorageFeatures.storageBuffer8BitAccess + << "\n"; + std::cout << "\t\tstoragePushConstant8 : " << !!eightBitStorageFeatures.storagePushConstant8 + << "\n"; + std::cout << "\t\tuniformAndStorageBuffer8BitAccess : " + << !!eightBitStorageFeatures.uniformAndStorageBuffer8BitAccess << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_astc_decode_mode" ) ) + { + vk::PhysicalDeviceASTCDecodeFeaturesEXT const & astcDecodeFeatures = + features2.get(); + std::cout << "\tASTCDecodeFeature:\n"; + std::cout << "\t\tdecodeModeSharedExponent : " << !!astcDecodeFeatures.decodeModeSharedExponent << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_blend_operation_advanced" ) ) + { + vk::PhysicalDeviceBlendOperationAdvancedFeaturesEXT const & blendOperationAdvancedFeatures = + features2.get(); + std::cout << "\tBlendOperationAdvancedFeatures:\n"; + std::cout << "\t\tadvancedBlendCoherentOperations : " + << !!blendOperationAdvancedFeatures.advancedBlendCoherentOperations << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_buffer_device_address" ) ) + { + vk::PhysicalDeviceBufferDeviceAddressFeaturesEXT const & bufferDeviceAddressFeatures = + features2.get(); + std::cout << "\tBufferDeviceAddressFeatures:\n"; + std::cout << "\t\tbufferDeviceAddress : " << !!bufferDeviceAddressFeatures.bufferDeviceAddress + << "\n"; + std::cout << "\t\tbufferDeviceAddressCaptureReplay : " + << !!bufferDeviceAddressFeatures.bufferDeviceAddressCaptureReplay << "\n"; + std::cout << "\t\tbufferDeviceAddressMultiDevice : " + << !!bufferDeviceAddressFeatures.bufferDeviceAddressMultiDevice << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_AMD_device_coherent_memory" ) ) + { + vk::PhysicalDeviceCoherentMemoryFeaturesAMD const & coherentMemoryFeatures = + features2.get(); + std::cout << "\tCoherentMemoryFeatures:\n"; + std::cout << "\t\tdeviceCoherentMemory : " << !!coherentMemoryFeatures.deviceCoherentMemory << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_compute_shader_derivatives" ) ) + { + vk::PhysicalDeviceComputeShaderDerivativesFeaturesNV const & computeShaderDerivativesFeatures = + features2.get(); + std::cout << "\tComputeShaderDerivativeFeatures:\n"; + std::cout << "\t\tcomputeDerivativeGroupLinear : " + << !!computeShaderDerivativesFeatures.computeDerivativeGroupLinear << "\n"; + std::cout << "\t\tcomputeDerivativeGroupQuads : " + << !!computeShaderDerivativesFeatures.computeDerivativeGroupQuads << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_conditional_rendering" ) ) + { + vk::PhysicalDeviceConditionalRenderingFeaturesEXT const & conditionalRenderingFeatures = + features2.get(); + std::cout << "\tConditionalRenderingFeatures:\n"; + std::cout << "\t\tconditionalRendering : " << !!conditionalRenderingFeatures.conditionalRendering + << "\n"; + std::cout << "\t\tinheritedConditionalRendering : " + << !!conditionalRenderingFeatures.inheritedConditionalRendering << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_cooperative_matrix" ) ) + { + vk::PhysicalDeviceCooperativeMatrixFeaturesNV const & cooperativeMatrixFeatures = + features2.get(); + std::cout << "\tCooperativeMatrixFeatures:\n"; + std::cout << "\t\tcooperativeMatrix : " << !!cooperativeMatrixFeatures.cooperativeMatrix + << "\n"; + std::cout << "\t\tcooperativeMatrixRobustBufferAccess : " + << !!cooperativeMatrixFeatures.cooperativeMatrixRobustBufferAccess << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_corner_sampled_image" ) ) + { + vk::PhysicalDeviceCornerSampledImageFeaturesNV const & cornerSampledImageFeatures = + features2.get(); + std::cout << "\tCornerSampledImageFeatures:\n"; + std::cout << "\t\tcornerSampledImage : " << !!cornerSampledImageFeatures.cornerSampledImage << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_coverage_reduction_mode" ) ) + { + vk::PhysicalDeviceCoverageReductionModeFeaturesNV const & coverageReductionModeFeatures = + features2.get(); + std::cout << "\tCoverageReductionModeFeatures:\n"; + std::cout << "\t\tcoverageReductionMode : " << !!coverageReductionModeFeatures.coverageReductionMode << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_dedicated_allocation_image_aliasing" ) ) + { + vk::PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const & dedicatedAllocationImageAliasingFeatures = + features2.get(); + std::cout << "\tDedicatedAllocationAliasingFeatures:\n"; + std::cout << "\t\tdedicatedAllocationImageAliasing : " + << !!dedicatedAllocationImageAliasingFeatures.dedicatedAllocationImageAliasing << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_depth_clip_enable" ) ) + { + vk::PhysicalDeviceDepthClipEnableFeaturesEXT const & depthClipEnabledFeatures = + features2.get(); + std::cout << "\tDepthClipEnabledFeatures:\n"; + std::cout << "\t\tdepthClipEnable : " << !!depthClipEnabledFeatures.depthClipEnable << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_descriptor_indexing" ) ) + { + vk::PhysicalDeviceDescriptorIndexingFeaturesEXT const & descriptorIndexingFeatures = + features2.get(); + std::cout << "\tDescriptorIndexingFeatures:\n"; + std::cout << "\t\tdescriptorBindingPartiallyBound : " + << !!descriptorIndexingFeatures.descriptorBindingPartiallyBound << "\n"; + std::cout << "\t\tdescriptorBindingSampledImageUpdateAfterBind : " + << !!descriptorIndexingFeatures.descriptorBindingSampledImageUpdateAfterBind << "\n"; + std::cout << "\t\tdescriptorBindingStorageBufferUpdateAfterBind : " + << !!descriptorIndexingFeatures.descriptorBindingStorageBufferUpdateAfterBind << "\n"; + std::cout << "\t\tdescriptorBindingStorageImageUpdateAfterBind : " + << !!descriptorIndexingFeatures.descriptorBindingStorageImageUpdateAfterBind << "\n"; + std::cout << "\t\tdescriptorBindingStorageTexelBufferUpdateAfterBind : " + << !!descriptorIndexingFeatures.descriptorBindingStorageTexelBufferUpdateAfterBind << "\n"; + std::cout << "\t\tdescriptorBindingUniformBufferUpdateAfterBind : " + << !!descriptorIndexingFeatures.descriptorBindingUniformBufferUpdateAfterBind << "\n"; + std::cout << "\t\tdescriptorBindingUniformTexelBufferUpdateAfterBind : " + << !!descriptorIndexingFeatures.descriptorBindingUniformTexelBufferUpdateAfterBind << "\n"; + std::cout << "\t\tdescriptorBindingUpdateUnusedWhilePending : " + << !!descriptorIndexingFeatures.descriptorBindingUpdateUnusedWhilePending << "\n"; + std::cout << "\t\tdescriptorBindingVariableDescriptorCount : " + << !!descriptorIndexingFeatures.descriptorBindingVariableDescriptorCount << "\n"; + std::cout << "\t\truntimeDescriptorArray : " + << !!descriptorIndexingFeatures.runtimeDescriptorArray << "\n"; + std::cout << "\t\tshaderInputAttachmentArrayDynamicIndexing : " + << !!descriptorIndexingFeatures.shaderInputAttachmentArrayDynamicIndexing << "\n"; + std::cout << "\t\tshaderInputAttachmentArrayNonUniformIndexing : " + << !!descriptorIndexingFeatures.shaderInputAttachmentArrayNonUniformIndexing << "\n"; + std::cout << "\t\tshaderSampledImageArrayNonUniformIndexing : " + << !!descriptorIndexingFeatures.shaderSampledImageArrayNonUniformIndexing << "\n"; + std::cout << "\t\tshaderStorageBufferArrayNonUniformIndexing : " + << !!descriptorIndexingFeatures.shaderStorageBufferArrayNonUniformIndexing << "\n"; + std::cout << "\t\tshaderStorageImageArrayNonUniformIndexing : " + << !!descriptorIndexingFeatures.shaderStorageImageArrayNonUniformIndexing << "\n"; + std::cout << "\t\tshaderStorageTexelBufferArrayDynamicIndexing : " + << !!descriptorIndexingFeatures.shaderStorageTexelBufferArrayDynamicIndexing << "\n"; + std::cout << "\t\tshaderStorageTexelBufferArrayNonUniformIndexing : " + << !!descriptorIndexingFeatures.shaderStorageTexelBufferArrayNonUniformIndexing << "\n"; + std::cout << "\t\tshaderUniformBufferArrayNonUniformIndexing : " + << !!descriptorIndexingFeatures.shaderUniformBufferArrayNonUniformIndexing << "\n"; + std::cout << "\t\tshaderUniformTexelBufferArrayDynamicIndexing : " + << !!descriptorIndexingFeatures.shaderUniformTexelBufferArrayDynamicIndexing << "\n"; + std::cout << "\t\tshaderUniformTexelBufferArrayNonUniformIndexing : " + << !!descriptorIndexingFeatures.shaderUniformTexelBufferArrayNonUniformIndexing << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_scissor_exclusive" ) ) + { + vk::PhysicalDeviceExclusiveScissorFeaturesNV const & exclusiveScissorFeatures = + features2.get(); + std::cout << "\tExclusiveScissorFeatures:\n"; + std::cout << "\t\texclusiveScissor : " << !!exclusiveScissorFeatures.exclusiveScissor << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_fragment_density_map" ) ) + { + vk::PhysicalDeviceFragmentDensityMapFeaturesEXT const & fragmentDensityMapFeatures = + features2.get(); + std::cout << "\tFragmentDensityMapFeatures:\n"; + std::cout << "\t\tfragmentDensityMap : " << !!fragmentDensityMapFeatures.fragmentDensityMap + << "\n"; + std::cout << "\t\tfragmentDensityMapDynamic : " + << !!fragmentDensityMapFeatures.fragmentDensityMapDynamic << "\n"; + std::cout << "\t\tfragmentDensityMapNonSubsampledImages : " + << !!fragmentDensityMapFeatures.fragmentDensityMapNonSubsampledImages << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_fragment_shader_barycentric" ) ) + { + vk::PhysicalDeviceFragmentShaderBarycentricFeaturesNV const & fragmentShaderBarycentricFeatures = + features2.get(); + std::cout << "\tFragmentShaderBarycentricFeatures:\n"; + std::cout << "\t\tfragmentShaderBarycentric : " << !!fragmentShaderBarycentricFeatures.fragmentShaderBarycentric + << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_fragment_shader_interlock" ) ) + { + vk::PhysicalDeviceFragmentShaderInterlockFeaturesEXT const & fragmentShaderInterlockFeatures = + features2.get(); + std::cout << "\tFragmentShaderInterlockFeatures:\n"; + std::cout << "\t\tfragmentShaderPixelInterlock : " + << !!fragmentShaderInterlockFeatures.fragmentShaderPixelInterlock << "\n"; + std::cout << "\t\tfragmentShaderSampleInterlock : " + << !!fragmentShaderInterlockFeatures.fragmentShaderSampleInterlock << "\n"; + std::cout << "\t\tfragmentShaderShadingRateInterlock : " + << !!fragmentShaderInterlockFeatures.fragmentShaderShadingRateInterlock << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_host_query_reset" ) ) + { + vk::PhysicalDeviceHostQueryResetFeaturesEXT const & hostQueryResetFeatures = + features2.get(); + std::cout << "\tHostQueryResetFeatures:\n"; + std::cout << "\t\thostQueryReset : " << !!hostQueryResetFeatures.hostQueryReset << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_imageless_framebuffer" ) ) + { + vk::PhysicalDeviceImagelessFramebufferFeaturesKHR const & imagelessFramebufferFeatures = + features2.get(); + std::cout << "\tImagelessFramebufferFeatures:\n"; + std::cout << "\t\timagelessFramebuffer : " << !!imagelessFramebufferFeatures.imagelessFramebuffer << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_index_type_uint8" ) ) + { + vk::PhysicalDeviceIndexTypeUint8FeaturesEXT const & indexTypeUint8Features = + features2.get(); + std::cout << "\tIndexTypeUint8Features:\n"; + std::cout << "\t\tindexTypeUint8 : " << !!indexTypeUint8Features.indexTypeUint8 << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_inline_uniform_block" ) ) + { + vk::PhysicalDeviceInlineUniformBlockFeaturesEXT const & inlineUniformBlockFeatures = + features2.get(); + std::cout << "\tInlineUniformBlockFeatures:\n"; + std::cout << "\t\tdescriptorBindingInlineUniformBlockUpdateAfterBind : " + << !!inlineUniformBlockFeatures.descriptorBindingInlineUniformBlockUpdateAfterBind << "\n"; + std::cout << "\t\tinlineUniformBlock : " + << !!inlineUniformBlockFeatures.inlineUniformBlock << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_line_rasterization" ) ) + { + vk::PhysicalDeviceLineRasterizationFeaturesEXT const & lineRasterizationFeatures = + features2.get(); + std::cout << "\tLineRasterizationFeatures:\n"; + std::cout << "\t\tbresenhamLines : " << !!lineRasterizationFeatures.bresenhamLines << "\n"; + std::cout << "\t\trectangularLines : " << !!lineRasterizationFeatures.rectangularLines << "\n"; + std::cout << "\t\tsmoothLines : " << !!lineRasterizationFeatures.smoothLines << "\n"; + std::cout << "\t\tstippledBresenhamLines : " << !!lineRasterizationFeatures.stippledBresenhamLines << "\n"; + std::cout << "\t\tstippledRectangularLines : " << !!lineRasterizationFeatures.stippledRectangularLines << "\n"; + std::cout << "\t\tstippledSmoothLines : " << !!lineRasterizationFeatures.stippledSmoothLines << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_memory_priority" ) ) + { + vk::PhysicalDeviceMemoryPriorityFeaturesEXT const & memoryPriorityFeatures = + features2.get(); + std::cout << "\tMemoryPriorityFeatures:\n"; + std::cout << "\t\tmemoryPriority : " << !!memoryPriorityFeatures.memoryPriority << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_mesh_shader" ) ) + { + vk::PhysicalDeviceMeshShaderFeaturesNV const & meshShaderFeatures = + features2.get(); + std::cout << "\tMeshShaderFeatures:\n"; + std::cout << "\t\tmeshShader : " << !!meshShaderFeatures.meshShader << "\n"; + std::cout << "\t\ttaskShader : " << !!meshShaderFeatures.taskShader << "\n"; + std::cout << "\n"; + } + + vk::PhysicalDeviceMultiviewFeatures const & multiviewFeatures = + features2.get(); + std::cout << "\tMultiviewFeatures:\n"; + std::cout << "\t\tmultiview : " << !!multiviewFeatures.multiview << "\n"; + std::cout << "\t\tmultiviewGeometryShader : " << !!multiviewFeatures.multiviewGeometryShader << "\n"; + std::cout << "\t\tmultiviewTessellationShader : " << !!multiviewFeatures.multiviewTessellationShader << "\n"; + std::cout << "\n"; + + if ( vk::su::contains( extensionProperties, "VK_KHR_pipeline_executable_properties" ) ) + { + vk::PhysicalDevicePipelineExecutablePropertiesFeaturesKHR const & pipelineExecutablePropertiesFeatures = + features2.get(); + std::cout << "\tPipelineExectuablePropertiesFeatures:\n"; + std::cout << "\t\tpipelineExecutableInfo : " << !!pipelineExecutablePropertiesFeatures.pipelineExecutableInfo + << "\n"; + std::cout << "\n"; + } + + vk::PhysicalDeviceProtectedMemoryFeatures const & protectedMemoryFeatures = + features2.get(); + std::cout << "\tProtectedMemoryFeatures:\n"; + std::cout << "\t\tprotectedMemory : " << !!protectedMemoryFeatures.protectedMemory << "\n"; + std::cout << "\n"; + + if ( vk::su::contains( extensionProperties, "VK_NV_representative_fragment_test" ) ) + { + vk::PhysicalDeviceRepresentativeFragmentTestFeaturesNV const & representativeFragmentTestFeatures = + features2.get(); + std::cout << "\tRepresentativeFragmentTestFeatures:\n"; + std::cout << "\t\trepresentativeFragmentTest : " + << !!representativeFragmentTestFeatures.representativeFragmentTest << "\n"; + std::cout << "\n"; + } + + vk::PhysicalDeviceSamplerYcbcrConversionFeatures const & samplerYcbcrConversionFeatures = + features2.get(); + std::cout << "\tSamplerYcbcrConversionFeatures:\n"; + std::cout << "\t\tsamplerYcbcrConversion : " << !!samplerYcbcrConversionFeatures.samplerYcbcrConversion << "\n"; + std::cout << "\n"; + + if ( vk::su::contains( extensionProperties, "VK_EXT_scalar_block_layout" ) ) + { + vk::PhysicalDeviceScalarBlockLayoutFeaturesEXT const & scalarBlockLayoutFeatures = + features2.get(); + std::cout << "\tScalarBlockLayoutFeatures:\n"; + std::cout << "\t\tscalarBlockLayout : " << !!scalarBlockLayoutFeatures.scalarBlockLayout << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_shader_atomic_int64" ) ) + { + vk::PhysicalDeviceShaderAtomicInt64FeaturesKHR const & shaderAtomicInt64Features = + features2.get(); + std::cout << "\tShaderAtomicInt64Features:\n"; + std::cout << "\t\tshaderBufferInt64Atomics : " << !!shaderAtomicInt64Features.shaderBufferInt64Atomics << "\n"; + std::cout << "\t\tshaderSharedInt64Atomics : " << !!shaderAtomicInt64Features.shaderSharedInt64Atomics << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_shader_demote_to_helper_invocation" ) ) + { + vk::PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const & shaderDemoteToHelperInvocationFeatures = + features2.get(); + std::cout << "\tShaderDemoteToHelperInvocationFeatures:\n"; + std::cout << "\t\tshaderDemoteToHelperInvocation : " + << !!shaderDemoteToHelperInvocationFeatures.shaderDemoteToHelperInvocation << "\n"; + std::cout << "\n"; + } + + vk::PhysicalDeviceShaderDrawParametersFeatures const & shaderDrawParametersFeature = + features2.get(); + std::cout << "\tShaderDrawParametersFeature:\n"; + std::cout << "\t\tshaderDrawParameters : " << !!shaderDrawParametersFeature.shaderDrawParameters << "\n"; + std::cout << "\n"; + + if ( vk::su::contains( extensionProperties, "VK_KHR_shader_float16_int8" ) ) + { + vk::PhysicalDeviceShaderFloat16Int8FeaturesKHR const & shaderFloat16Int8Features = + features2.get(); + std::cout << "\tShaderFloat16Int8Features:\n"; + std::cout << "\t\tshaderFloat16 : " << !!shaderFloat16Int8Features.shaderFloat16 << "\n"; + std::cout << "\t\tshaderInt8 : " << !!shaderFloat16Int8Features.shaderInt8 << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_shader_image_footprint" ) ) + { + vk::PhysicalDeviceShaderImageFootprintFeaturesNV const & shaderImageFootprintFeatures = + features2.get(); + std::cout << "\tShaderImageFootprintFeatures:\n"; + std::cout << "\t\timageFootprint : " << !!shaderImageFootprintFeatures.imageFootprint << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_INTEL_shader_integer_functions2" ) ) + { + vk::PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const & shaderIntegerFunctions2Features = + features2.get(); + std::cout << "\tShaderIntegerFunctions2Features:\n"; + std::cout << "\t\tshaderIntegerFunctions2 : " << !!shaderIntegerFunctions2Features.shaderIntegerFunctions2 + << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_shader_sm_builtins" ) ) + { + vk::PhysicalDeviceShaderSMBuiltinsFeaturesNV const & shaderSMBuiltinsFeatures = + features2.get(); + std::cout << "\tShaderSMBuiltinsFeatures:\n"; + std::cout << "\t\tshaderSMBuiltins : " << !!shaderSMBuiltinsFeatures.shaderSMBuiltins << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_shader_subgroup_extended_types" ) ) + { + vk::PhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR const & shaderSubgroupExtendedTypesFeatures = + features2.get(); + std::cout << "\tShaderSubgroupExtendedTypeFeatures:\n"; + std::cout << "\t\tshaderSubgroupExtendedTypes : " + << !!shaderSubgroupExtendedTypesFeatures.shaderSubgroupExtendedTypes << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_shading_rate_image" ) ) + { + vk::PhysicalDeviceShadingRateImageFeaturesNV const & shadingRateImageFeatures = + features2.get(); + std::cout << "\tShadingRateImageFeatures:\n"; + std::cout << "\t\tshadingRateCoarseSampleOrder : " << !!shadingRateImageFeatures.shadingRateCoarseSampleOrder + << "\n"; + std::cout << "\t\tshadingRateImage : " << !!shadingRateImageFeatures.shadingRateImage << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_subgroup_size_control" ) ) + { + vk::PhysicalDeviceSubgroupSizeControlFeaturesEXT const & subgroupSizeControlFeatures = + features2.get(); + std::cout << "\tSubgroupSizeControlFeatures:\n"; + std::cout << "\t\tcomputeFullSubgroups : " << !!subgroupSizeControlFeatures.computeFullSubgroups << "\n"; + std::cout << "\t\tsubgroupSizeControl : " << !!subgroupSizeControlFeatures.subgroupSizeControl << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_texel_buffer_alignment" ) ) + { + vk::PhysicalDeviceTexelBufferAlignmentFeaturesEXT const & texelBufferAlignmentFeatures = + features2.get(); + std::cout << "\tTexelBufferAlignmentFeatures:\n"; + std::cout << "\t\ttexelBufferAlignment : " << !!texelBufferAlignmentFeatures.texelBufferAlignment << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_texture_compression_astc_hdr" ) ) + { + vk::PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const & textureCompressionASTCHDRFeatures = + features2.get(); + std::cout << "\tTextureCompressionASTCHHRFeatures:\n"; + std::cout << "\t\ttextureCompressionASTC_HDR : " + << !!textureCompressionASTCHDRFeatures.textureCompressionASTC_HDR << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_timeline_semaphore" ) ) + { + vk::PhysicalDeviceTimelineSemaphoreFeaturesKHR const & timelineSemaphoreFeatures = + features2.get(); + std::cout << "\tTimelineSemaphoreFeatures:\n"; + std::cout << "\t\ttimelineSemaphore :" << !!timelineSemaphoreFeatures.timelineSemaphore << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_transform_feedback" ) ) + { + vk::PhysicalDeviceTransformFeedbackFeaturesEXT const & transformFeedbackFeatures = + features2.get(); + std::cout << "\tTransformFeedbackFeatures:\n"; + std::cout << "\t\tgeometryStreams : " << !!transformFeedbackFeatures.geometryStreams << "\n"; + std::cout << "\t\ttransformFeedback : " << !!transformFeedbackFeatures.transformFeedback << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_uniform_buffer_standard_layout" ) ) + { + vk::PhysicalDeviceUniformBufferStandardLayoutFeaturesKHR const & uniformBufferStandardLayoutFeatures = + features2.get(); + std::cout << "\tUniformBufferStandardLayoutFeatures:\n"; + std::cout << "\t\tuniformBufferStandardLayout : " + << !!uniformBufferStandardLayoutFeatures.uniformBufferStandardLayout << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_variable_pointers" ) ) + { + vk::PhysicalDeviceVariablePointersFeatures const & variablePointersFeatures = + features2.get(); + std::cout << "\tVariablePointersFeatures:\n"; + std::cout << "\t\tvariablePointers : " << !!variablePointersFeatures.variablePointers << "\n"; + std::cout << "\t\tvariablePointersStorageBuffer : " << !!variablePointersFeatures.variablePointersStorageBuffer + << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_vertex_attribute_divisor" ) ) + { + vk::PhysicalDeviceVertexAttributeDivisorFeaturesEXT const & vertexAttributeDivisorFeatures = + features2.get(); + std::cout << "\tVertexAttributeDivisorFeature:\n"; + std::cout << "\t\tvertexAttributeInstanceRateDivisor : " + << !!vertexAttributeDivisorFeatures.vertexAttributeInstanceRateDivisor << "\n"; + std::cout << "\t\tvertexAttributeInstanceRateZeroDivisor : " + << !!vertexAttributeDivisorFeatures.vertexAttributeInstanceRateZeroDivisor << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_vulkan_memory_model" ) ) + { + vk::PhysicalDeviceVulkanMemoryModelFeaturesKHR const & vulkanMemoryModelFeatures = + features2.get(); + std::cout << "\tVulkanMemoryModelFeatures:\n"; + std::cout << "\t\tvulkanMemoryModel : " + << !!vulkanMemoryModelFeatures.vulkanMemoryModel << "\n"; + std::cout << "\t\tvulkanMemoryModelAvailabilityVisibilityChains : " + << !!vulkanMemoryModelFeatures.vulkanMemoryModelAvailabilityVisibilityChains << "\n"; + std::cout << "\t\tvulkanMemoryModelDeviceScope : " + << !!vulkanMemoryModelFeatures.vulkanMemoryModelDeviceScope << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_sampler_ycbcr_conversion" ) ) + { + vk::PhysicalDeviceYcbcrImageArraysFeaturesEXT const & ycbcrImageArraysFeatures = + features2.get(); + std::cout << "\tYcbcrImageArraysFeatures:\n"; + std::cout << "\t\tycbcrImageArrays : " << !!ycbcrImageArraysFeatures.ycbcrImageArrays << "\n"; + std::cout << "\n"; + } + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/PhysicalDeviceGroups/CMakeLists.txt b/RAII_Samples/PhysicalDeviceGroups/CMakeLists.txt new file mode 100644 index 0000000..0528539 --- /dev/null +++ b/RAII_Samples/PhysicalDeviceGroups/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_PhysicalDeviceGroups) + +set(HEADERS +) + +set(SOURCES + PhysicalDeviceGroups.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_PhysicalDeviceGroups + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_PhysicalDeviceGroups PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_PhysicalDeviceGroups PRIVATE utils) diff --git a/RAII_Samples/PhysicalDeviceGroups/PhysicalDeviceGroups.cpp b/RAII_Samples/PhysicalDeviceGroups/PhysicalDeviceGroups.cpp new file mode 100644 index 0000000..8ff159e --- /dev/null +++ b/RAII_Samples/PhysicalDeviceGroups/PhysicalDeviceGroups.cpp @@ -0,0 +1,107 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : PhysicalDeviceGroups +// Get the PhysicalDeviceGroups. + +#include "../utils/utils.hpp" +#include "vulkan/vulkan.hpp" + +#include + +static char const * AppName = "PhysicalDeviceGroups"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, {}, VK_API_VERSION_1_1 ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + + /* VULKAN_KEY_START */ + + std::vector groupProperties = instance->enumeratePhysicalDeviceGroups(); + + std::cout << std::boolalpha; + for ( size_t i = 0; i < groupProperties.size(); i++ ) + { + std::cout << "Group Properties " << i << " :\n"; + std::cout << "\t" + << "physicalDeviceCount = " << groupProperties[i].physicalDeviceCount << "\n"; + std::cout << "\t" + << "physicalDevices:\n"; + for ( size_t j = 0; j < groupProperties[i].physicalDeviceCount; j++ ) + { + std::unique_ptr physicalDevice = vk::raii::su::make_unique( + static_cast( groupProperties[i].physicalDevices[j] ), instance->getDispatcher() ); + std::cout << "\t\t" << j << " : " << physicalDevice->getProperties().deviceName << "\n"; + } + std::cout << "\t" + << "subsetAllocation = " << !!groupProperties[i].subsetAllocation << "\n"; + std::cout << "\n"; + + if ( 1 < groupProperties[i].physicalDeviceCount ) + { + std::unique_ptr physicalDevice = vk::raii::su::make_unique( + static_cast( groupProperties[i].physicalDevices[0] ), instance->getDispatcher() ); + + // get the QueueFamilyProperties of the first PhysicalDevice + std::vector queueFamilyProperties = physicalDevice->getQueueFamilyProperties(); + + // get the first index into queueFamiliyProperties which supports graphics + auto propertyIterator = std::find_if( + queueFamilyProperties.begin(), queueFamilyProperties.end(), []( vk::QueueFamilyProperties const & qfp ) { + return qfp.queueFlags & vk::QueueFlagBits::eGraphics; + } ); + size_t graphicsQueueFamilyIndex = std::distance( queueFamilyProperties.begin(), propertyIterator ); + assert( graphicsQueueFamilyIndex < queueFamilyProperties.size() ); + + // create a Device + float queuePriority = 0.0f; + vk::DeviceQueueCreateInfo deviceQueueCreateInfo( + {}, static_cast( graphicsQueueFamilyIndex ), 1, &queuePriority ); + vk::StructureChain deviceCreateInfoChain( + { {}, deviceQueueCreateInfo }, + { groupProperties[i].physicalDeviceCount, groupProperties[i].physicalDevices } ); + + std::unique_ptr device = + vk::raii::su::make_unique( *physicalDevice, deviceCreateInfoChain.get() ); + } + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/PhysicalDeviceMemoryProperties/CMakeLists.txt b/RAII_Samples/PhysicalDeviceMemoryProperties/CMakeLists.txt new file mode 100644 index 0000000..c3a29c8 --- /dev/null +++ b/RAII_Samples/PhysicalDeviceMemoryProperties/CMakeLists.txt @@ -0,0 +1,37 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if(!MSVC) + cmake_minimum_required(VERSION 3.2) + + project(RAII_PhysicalDeviceMemoryProperties) + + set(HEADERS + ) + + set(SOURCES + PhysicalDeviceMemoryProperties.cpp + ) + + source_group(headers FILES ${HEADERS}) + source_group(sources FILES ${SOURCES}) + + add_executable(RAII_PhysicalDeviceMemoryProperties + ${HEADERS} + ${SOURCES} + ) + + set_target_properties(RAII_PhysicalDeviceMemoryProperties PROPERTIES FOLDER "RAIISamples") + target_link_libraries(RAII_PhysicalDeviceMemoryProperties PRIVATE utils) +endif(!MSVC) diff --git a/RAII_Samples/PhysicalDeviceMemoryProperties/PhysicalDeviceMemoryProperties.cpp b/RAII_Samples/PhysicalDeviceMemoryProperties/PhysicalDeviceMemoryProperties.cpp new file mode 100644 index 0000000..e712d2a --- /dev/null +++ b/RAII_Samples/PhysicalDeviceMemoryProperties/PhysicalDeviceMemoryProperties.cpp @@ -0,0 +1,119 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : PhysicalDeviceMemoryProperties +// Get memory properties per physical device. + +#include "../utils/utils.hpp" +#include "vulkan/vulkan.hpp" + +#include +#include + +static char const * AppName = "PhysicalDeviceMemoryProperties"; +static char const * EngineName = "Vulkan.hpp"; + +std::string formatSize( vk::DeviceSize size ) +{ + std::ostringstream oss; + if ( size < 1024 ) + { + oss << size << " B"; + } + else if ( size < 1024 * 1024 ) + { + oss << size / 1024.f << " KB"; + } + else if ( size < 1024 * 1024 * 1024 ) + { + oss << size / ( 1024.0f * 1024.0f ) << " MB"; + } + else + { + oss << size / ( 1024.0f * 1024.0f * 1024.0f ) << " GB"; + } + return oss.str(); +} + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr> contextHandle = std::make_unique>(); + std::unique_ptr> instanceHandle = + vk::su::makeUniqueInstanceHandle( *contextHandle, AppName, EngineName, {}, {}, VK_API_VERSION_1_1 ); +#if !defined( NDEBUG ) + std::unique_ptr> debugUtilsMessenger = + vk::su::makeUniqueDebugUtilsMessengerEXTHandle( *instanceHandle ); +#endif + + // enumerate the physicalDevices + std::unique_ptr> physicalDeviceHandles = + std::make_unique>( *instanceHandle ); + + /* VULKAN_KEY_START */ + + for ( size_t i = 0; i < physicalDeviceHandles->size(); i++ ) + { + // some properties are only valid, if a corresponding extension is available! + std::vector extensionProperties = + (*physicalDeviceHandles)[i].enumerateDeviceExtensionProperties(); + bool containsMemoryBudget = vk::su::contains( extensionProperties, "VK_EXT_memory_budget" ); + + std::cout << "PhysicalDevice " << i << " :\n"; + auto memoryProperties2 = + (*physicalDeviceHandles)[i] + .getMemoryProperties2(); + vk::PhysicalDeviceMemoryProperties const & memoryProperties = + memoryProperties2.get().memoryProperties; + vk::PhysicalDeviceMemoryBudgetPropertiesEXT const & memoryBudgetProperties = + memoryProperties2.get(); + std::cout << "memoryHeapCount: " << memoryProperties.memoryHeapCount << "\n"; + for ( uint32_t j = 0; j < memoryProperties.memoryHeapCount; j++ ) + { + std::cout << " " << j << ": size = " << formatSize( memoryProperties.memoryHeaps[j].size ) + << ", flags = " << vk::to_string( memoryProperties.memoryHeaps[j].flags ) << "\n"; + if ( containsMemoryBudget ) + { + std::cout << " heapBudget = " << formatSize( memoryBudgetProperties.heapBudget[j] ) + << ", heapUsage = " << formatSize( memoryBudgetProperties.heapUsage[j] ) << "\n"; + } + } + std::cout << "memoryTypeCount: " << memoryProperties.memoryTypeCount << "\n"; + for ( uint32_t j = 0; j < memoryProperties.memoryTypeCount; j++ ) + { + std::cout << " " << j << ": heapIndex = " << memoryProperties.memoryTypes[j].heapIndex + << ", flags = " << vk::to_string( memoryProperties.memoryTypes[j].propertyFlags ) << "\n"; + } + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/PhysicalDeviceProperties/CMakeLists.txt b/RAII_Samples/PhysicalDeviceProperties/CMakeLists.txt new file mode 100644 index 0000000..670d047 --- /dev/null +++ b/RAII_Samples/PhysicalDeviceProperties/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_PhysicalDeviceProperties) + +set(HEADERS +) + +set(SOURCES + PhysicalDeviceProperties.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_PhysicalDeviceProperties + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_PhysicalDeviceProperties PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_PhysicalDeviceProperties PRIVATE utils) diff --git a/RAII_Samples/PhysicalDeviceProperties/PhysicalDeviceProperties.cpp b/RAII_Samples/PhysicalDeviceProperties/PhysicalDeviceProperties.cpp new file mode 100644 index 0000000..d52ebfd --- /dev/null +++ b/RAII_Samples/PhysicalDeviceProperties/PhysicalDeviceProperties.cpp @@ -0,0 +1,1272 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : PhysicalDeviceProperties +// Get properties per physical device. + +#include "../utils/utils.hpp" +#include "vulkan/vulkan.hpp" + +#include +#include +#include + +static char const * AppName = "PhysicalDeviceProperties"; +static char const * EngineName = "Vulkan.hpp"; + +std::string decodeAPIVersion( uint32_t apiVersion ) +{ + return std::to_string( VK_VERSION_MAJOR( apiVersion ) ) + "." + std::to_string( VK_VERSION_MINOR( apiVersion ) ) + + "." + std::to_string( VK_VERSION_PATCH( apiVersion ) ); +} + +std::string decodeDriverVersion( uint32_t driverVersion, uint32_t vendorID ) +{ + switch ( vendorID ) + { + case 4318: + return std::to_string( ( driverVersion >> 22 ) & 0x3FF ) + "." + + std::to_string( ( driverVersion >> 14 ) & 0xFF ) + "." + std::to_string( ( driverVersion >> 6 ) & 0xFF ) + + "." + std::to_string( driverVersion & 0x3F ); + case 0x8086: + return std::to_string( ( driverVersion >> 14 ) & 0x3FFFF ) + "." + std::to_string( ( driverVersion & 0x3FFF ) ); + default: return decodeAPIVersion( driverVersion ); + } +} + +std::string decodeVendorID( uint32_t vendorID ) +{ + // below 0x10000 are the PCI vendor IDs (https://pcisig.com/membership/member-companies) + if ( vendorID < 0x10000 ) + { + switch ( vendorID ) + { + case 0x1022: return "Advanced Micro Devices"; + case 0x10DE: return "NVidia Corporation"; + case 0x8086: return "Intel Corporation"; + default: return std::to_string( vendorID ); + } + } + else + { + // above 0x10000 should be vkVendorIDs + return vk::to_string( vk::VendorId( vendorID ) ); + } +} + +namespace vk +{ + namespace su + { + struct LUID + { + public: + LUID( uint8_t const data[VK_LUID_SIZE] ) + { + memcpy( m_data, data, VK_LUID_SIZE * sizeof( uint8_t ) ); + } + + uint8_t m_data[VK_LUID_SIZE]; + }; + + std::ostream & operator<<( std::ostream & os, LUID const & uuid ) + { + os << std::setfill( '0' ) << std::hex; + for ( int j = 0; j < VK_LUID_SIZE; ++j ) + { + os << std::setw( 2 ) << static_cast( uuid.m_data[j] ); + if ( j == 3 || j == 5 ) + { + std::cout << '-'; + } + } + os << std::setfill( ' ' ) << std::dec; + return os; + } + } // namespace su +} // namespace vk + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, {}, VK_API_VERSION_1_1 ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + + // enumerate the physicalDevices + vk::raii::PhysicalDevices physicalDevices( *instance ); + + /* VULKAN_KEY_START */ + + std::cout << std::boolalpha; + for ( size_t i = 0; i < physicalDevices.size(); i++ ) + { + // some properties are only valid, if a corresponding extension is available! + std::vector extensionProperties = + physicalDevices[i].enumerateDeviceExtensionProperties(); + + std::cout << "PhysicalDevice " << i << "\n"; + auto properties2 = physicalDevices[i] + .getProperties2(); + vk::PhysicalDeviceProperties const & properties = properties2.get().properties; + std::cout << "\t" + << "Properties:\n"; + std::cout << "\t\t" + << "apiVersion = " << decodeAPIVersion( properties.apiVersion ) << "\n"; + std::cout << "\t\t" + << "driverVersion = " << decodeDriverVersion( properties.driverVersion, properties.vendorID ) + << "\n"; + std::cout << "\t\t" + << "vendorID = " << decodeVendorID( properties.vendorID ) << "\n"; + std::cout << "\t\t" + << "deviceID = " << properties.deviceID << "\n"; + std::cout << "\t\t" + << "deviceType = " << vk::to_string( properties.deviceType ) << "\n"; + std::cout << "\t\t" + << "deviceName = " << properties.deviceName << "\n"; + std::cout << "\t\t" + << "pipelineCacheUUID = " << vk::su::UUID( properties.pipelineCacheUUID ) << "\n"; + std::cout << "\t\t" + << "limits:\n"; + std::cout << "\t\t\t" + << "bufferImageGranularity = " << properties.limits.bufferImageGranularity + << "\n"; + std::cout << "\t\t\t" + << "discreteQueuePriorities = " << properties.limits.discreteQueuePriorities + << "\n"; + std::cout << "\t\t\t" + << "framebufferColorSampleCounts = " + << vk::to_string( properties.limits.framebufferColorSampleCounts ) << "\n"; + std::cout << "\t\t\t" + << "framebufferDepthSampleCounts = " + << vk::to_string( properties.limits.framebufferDepthSampleCounts ) << "\n"; + std::cout << "\t\t\t" + << "framebufferNoAttachmentsSampleCounts = " + << vk::to_string( properties.limits.framebufferNoAttachmentsSampleCounts ) << "\n"; + std::cout << "\t\t\t" + << "framebufferStencilSampleCounts = " + << vk::to_string( properties.limits.framebufferStencilSampleCounts ) << "\n"; + std::cout << "\t\t\t" + << "lineWidthGranularity = " << properties.limits.lineWidthGranularity + << "\n"; + std::cout << "\t\t\t" + << "lineWidthRange = " + << "[" << properties.limits.lineWidthRange[0] << ", " << properties.limits.lineWidthRange[1] << "]" + << "\n"; + std::cout << "\t\t\t" + << "maxBoundDescriptorSets = " << properties.limits.maxBoundDescriptorSets + << "\n"; + std::cout << "\t\t\t" + << "maxClipDistances = " << properties.limits.maxClipDistances << "\n"; + std::cout << "\t\t\t" + << "maxColorAttachments = " << properties.limits.maxColorAttachments + << "\n"; + std::cout << "\t\t\t" + << "maxCombinedClipAndCullDistances = " + << properties.limits.maxCombinedClipAndCullDistances << "\n"; + std::cout << "\t\t\t" + << "maxComputeSharedMemorySize = " << properties.limits.maxComputeSharedMemorySize + << "\n"; + std::cout << "\t\t\t" + << "maxComputeWorkGroupCount = " + << "[" << properties.limits.maxComputeWorkGroupCount[0] << ", " + << properties.limits.maxComputeWorkGroupCount[1] << ", " + << properties.limits.maxComputeWorkGroupCount[2] << "]" + << "\n"; + std::cout << "\t\t\t" + << "maxComputeWorkGroupInvocations = " + << properties.limits.maxComputeWorkGroupInvocations << "\n"; + std::cout << "\t\t\t" + << "maxComputeWorkGroupSize = " + << "[" << properties.limits.maxComputeWorkGroupSize[0] << ", " + << properties.limits.maxComputeWorkGroupSize[1] << ", " << properties.limits.maxComputeWorkGroupSize[2] + << "]" + << "\n"; + std::cout << "\t\t\t" + << "maxCullDistances = " << properties.limits.maxCullDistances << "\n"; + std::cout << "\t\t\t" + << "maxDescriptorSetInputAttachments = " + << properties.limits.maxDescriptorSetInputAttachments << "\n"; + std::cout << "\t\t\t" + << "maxDescriptorSetSampledImages = " + << properties.limits.maxDescriptorSetSampledImages << "\n"; + std::cout << "\t\t\t" + << "maxDescriptorSetSamplers = " << properties.limits.maxDescriptorSetSamplers + << "\n"; + std::cout << "\t\t\t" + << "maxDescriptorSetStorageBuffers = " + << properties.limits.maxDescriptorSetStorageBuffers << "\n"; + std::cout << "\t\t\t" + << "maxDescriptorSetStorageBuffersDynamic = " + << properties.limits.maxDescriptorSetStorageBuffersDynamic << "\n"; + std::cout << "\t\t\t" + << "maxDescriptorSetStorageImages = " + << properties.limits.maxDescriptorSetStorageImages << "\n"; + std::cout << "\t\t\t" + << "maxDescriptorSetUniformBuffers = " + << properties.limits.maxDescriptorSetUniformBuffers << "\n"; + std::cout << "\t\t\t" + << "maxDescriptorSetUniformBuffersDynamic = " + << properties.limits.maxDescriptorSetUniformBuffersDynamic << "\n"; + std::cout << "\t\t\t" + << "maxDrawIndexedIndexValue = " << properties.limits.maxDrawIndexedIndexValue + << "\n"; + std::cout << "\t\t\t" + << "maxDrawIndirectCount = " << properties.limits.maxDrawIndirectCount + << "\n"; + std::cout << "\t\t\t" + << "maxFragmentCombinedOutputResources = " + << properties.limits.maxFragmentCombinedOutputResources << "\n"; + std::cout << "\t\t\t" + << "maxFragmentDualSrcAttachments = " + << properties.limits.maxFragmentDualSrcAttachments << "\n"; + std::cout << "\t\t\t" + << "maxFragmentInputComponents = " << properties.limits.maxFragmentInputComponents + << "\n"; + std::cout << "\t\t\t" + << "maxFragmentOutputAttachments = " + << properties.limits.maxFragmentOutputAttachments << "\n"; + std::cout << "\t\t\t" + << "maxFramebufferHeight = " << properties.limits.maxFramebufferHeight + << "\n"; + std::cout << "\t\t\t" + << "maxFramebufferLayers = " << properties.limits.maxFramebufferLayers + << "\n"; + std::cout << "\t\t\t" + << "maxFramebufferWidth = " << properties.limits.maxFramebufferWidth + << "\n"; + std::cout << "\t\t\t" + << "maxGeometryInputComponents = " << properties.limits.maxGeometryInputComponents + << "\n"; + std::cout << "\t\t\t" + << "maxGeometryOutputComponents = " << properties.limits.maxGeometryOutputComponents + << "\n"; + std::cout << "\t\t\t" + << "maxGeometryOutputVertices = " << properties.limits.maxGeometryOutputVertices + << "\n"; + std::cout << "\t\t\t" + << "maxGeometryShaderInvocations = " + << properties.limits.maxGeometryShaderInvocations << "\n"; + std::cout << "\t\t\t" + << "maxGeometryTotalOutputComponents = " + << properties.limits.maxGeometryTotalOutputComponents << "\n"; + std::cout << "\t\t\t" + << "maxImageArrayLayers = " << properties.limits.maxImageArrayLayers + << "\n"; + std::cout << "\t\t\t" + << "maxImageDimension1D = " << properties.limits.maxImageDimension1D + << "\n"; + std::cout << "\t\t\t" + << "maxImageDimension2D = " << properties.limits.maxImageDimension2D + << "\n"; + std::cout << "\t\t\t" + << "maxImageDimension3D = " << properties.limits.maxImageDimension3D + << "\n"; + std::cout << "\t\t\t" + << "maxImageDimensionCube = " << properties.limits.maxImageDimensionCube + << "\n"; + std::cout << "\t\t\t" + << "maxInterpolationOffset = " << properties.limits.maxInterpolationOffset + << "\n"; + std::cout << "\t\t\t" + << "maxMemoryAllocationCount = " << properties.limits.maxMemoryAllocationCount + << "\n"; + std::cout << "\t\t\t" + << "maxPerStageDescriptorInputAttachments = " + << properties.limits.maxPerStageDescriptorInputAttachments << "\n"; + std::cout << "\t\t\t" + << "maxPerStageDescriptorSampledImages = " + << properties.limits.maxPerStageDescriptorSampledImages << "\n"; + std::cout << "\t\t\t" + << "maxPerStageDescriptorSamplers = " + << properties.limits.maxPerStageDescriptorSamplers << "\n"; + std::cout << "\t\t\t" + << "maxPerStageDescriptorStorageBuffers = " + << properties.limits.maxPerStageDescriptorStorageBuffers << "\n"; + std::cout << "\t\t\t" + << "maxPerStageDescriptorStorageImages = " + << properties.limits.maxPerStageDescriptorStorageImages << "\n"; + std::cout << "\t\t\t" + << "maxPerStageDescriptorUniformBuffers = " + << properties.limits.maxPerStageDescriptorUniformBuffers << "\n"; + std::cout << "\t\t\t" + << "maxPerStageResources = " << properties.limits.maxPerStageResources + << "\n"; + std::cout << "\t\t\t" + << "maxPushConstantsSize = " << properties.limits.maxPushConstantsSize + << "\n"; + std::cout << "\t\t\t" + << "maxSampleMaskWords = " << properties.limits.maxSampleMaskWords << "\n"; + std::cout << "\t\t\t" + << "maxSamplerAllocationCount = " << properties.limits.maxSamplerAllocationCount + << "\n"; + std::cout << "\t\t\t" + << "maxSamplerAnisotropy = " << properties.limits.maxSamplerAnisotropy + << "\n"; + std::cout << "\t\t\t" + << "maxSamplerLodBias = " << properties.limits.maxSamplerLodBias << "\n"; + std::cout << "\t\t\t" + << "maxStorageBufferRange = " << properties.limits.maxStorageBufferRange + << "\n"; + std::cout << "\t\t\t" + << "maxTessellationControlPerPatchOutputComponents = " + << properties.limits.maxTessellationControlPerPatchOutputComponents << "\n"; + std::cout << "\t\t\t" + << "maxTessellationControlPerVertexInputComponents = " + << properties.limits.maxTessellationControlPerVertexInputComponents << "\n"; + std::cout << "\t\t\t" + << "maxTessellationControlPerVertexOutputComponents = " + << properties.limits.maxTessellationControlPerVertexOutputComponents << "\n"; + std::cout << "\t\t\t" + << "maxTessellationControlTotalOutputComponents = " + << properties.limits.maxTessellationControlTotalOutputComponents << "\n"; + std::cout << "\t\t\t" + << "maxTessellationEvaluationInputComponents = " + << properties.limits.maxTessellationEvaluationInputComponents << "\n"; + std::cout << "\t\t\t" + << "maxTessellationEvaluationOutputComponents = " + << properties.limits.maxTessellationEvaluationOutputComponents << "\n"; + std::cout << "\t\t\t" + << "maxTessellationGenerationLevel = " + << properties.limits.maxTessellationGenerationLevel << "\n"; + std::cout << "\t\t\t" + << "maxTessellationPatchSize = " << properties.limits.maxTessellationPatchSize + << "\n"; + std::cout << "\t\t\t" + << "maxTexelBufferElements = " << properties.limits.maxTexelBufferElements + << "\n"; + std::cout << "\t\t\t" + << "maxTexelGatherOffset = " << properties.limits.maxTexelGatherOffset + << "\n"; + std::cout << "\t\t\t" + << "maxTexelOffset = " << properties.limits.maxTexelOffset << "\n"; + std::cout << "\t\t\t" + << "maxUniformBufferRange = " << properties.limits.maxUniformBufferRange + << "\n"; + std::cout << "\t\t\t" + << "maxVertexInputAttributeOffset = " + << properties.limits.maxVertexInputAttributeOffset << "\n"; + std::cout << "\t\t\t" + << "maxVertexInputAttributes = " << properties.limits.maxVertexInputAttributes + << "\n"; + std::cout << "\t\t\t" + << "maxVertexInputBindings = " << properties.limits.maxVertexInputBindings + << "\n"; + std::cout << "\t\t\t" + << "maxVertexInputBindingStride = " << properties.limits.maxVertexInputBindingStride + << "\n"; + std::cout << "\t\t\t" + << "maxVertexOutputComponents = " << properties.limits.maxVertexOutputComponents + << "\n"; + std::cout << "\t\t\t" + << "maxViewportDimensions = " + << "[" << properties.limits.maxViewportDimensions[0] << ", " + << properties.limits.maxViewportDimensions[1] << "]" + << "\n"; + std::cout << "\t\t\t" + << "maxViewports = " << properties.limits.maxViewports << "\n"; + std::cout << "\t\t\t" + << "minInterpolationOffset = " << properties.limits.minInterpolationOffset + << "\n"; + std::cout << "\t\t\t" + << "minMemoryMapAlignment = " << properties.limits.minMemoryMapAlignment + << "\n"; + std::cout << "\t\t\t" + << "minStorageBufferOffsetAlignment = " + << properties.limits.minStorageBufferOffsetAlignment << "\n"; + std::cout << "\t\t\t" + << "minTexelBufferOffsetAlignment = " + << properties.limits.minTexelBufferOffsetAlignment << "\n"; + std::cout << "\t\t\t" + << "minTexelGatherOffset = " << properties.limits.minTexelGatherOffset + << "\n"; + std::cout << "\t\t\t" + << "minTexelOffset = " << properties.limits.minTexelOffset << "\n"; + std::cout << "\t\t\t" + << "minUniformBufferOffsetAlignment = " + << properties.limits.minUniformBufferOffsetAlignment << "\n"; + std::cout << "\t\t\t" + << "mipmapPrecisionBits = " << properties.limits.mipmapPrecisionBits + << "\n"; + std::cout << "\t\t\t" + << "nonCoherentAtomSize = " << properties.limits.nonCoherentAtomSize + << "\n"; + std::cout << "\t\t\t" + << "optimalBufferCopyOffsetAlignment = " + << properties.limits.optimalBufferCopyOffsetAlignment << "\n"; + std::cout << "\t\t\t" + << "optimalBufferCopyRowPitchAlignment = " + << properties.limits.optimalBufferCopyRowPitchAlignment << "\n"; + std::cout << "\t\t\t" + << "pointSizeGranularity = " << properties.limits.pointSizeGranularity + << "\n"; + std::cout << "\t\t\t" + << "pointSizeRange = " + << "[" << properties.limits.pointSizeRange[0] << ", " << properties.limits.pointSizeRange[1] << "]" + << "\n"; + std::cout << "\t\t\t" + << "sampledImageColorSampleCounts = " + << vk::to_string( properties.limits.sampledImageColorSampleCounts ) << "\n"; + std::cout << "\t\t\t" + << "sampledImageDepthSampleCounts = " + << vk::to_string( properties.limits.sampledImageDepthSampleCounts ) << "\n"; + std::cout << "\t\t\t" + << "sampledImageIntegerSampleCounts = " + << vk::to_string( properties.limits.sampledImageIntegerSampleCounts ) << "\n"; + std::cout << "\t\t\t" + << "sampledImageStencilSampleCounts = " + << vk::to_string( properties.limits.sampledImageStencilSampleCounts ) << "\n"; + std::cout << "\t\t\t" + << "sparseAddressSpaceSize = " << properties.limits.sparseAddressSpaceSize + << "\n"; + std::cout << "\t\t\t" + << "standardSampleLocations = " << !!properties.limits.standardSampleLocations + << "\n"; + std::cout << "\t\t\t" + << "storageImageSampleCounts = " + << vk::to_string( properties.limits.storageImageSampleCounts ) << "\n"; + std::cout << "\t\t\t" + << "strictLines = " << !!properties.limits.strictLines << "\n"; + std::cout << "\t\t\t" + << "subPixelInterpolationOffsetBits = " + << properties.limits.subPixelInterpolationOffsetBits << "\n"; + std::cout << "\t\t\t" + << "subPixelPrecisionBits = " << properties.limits.subPixelPrecisionBits + << "\n"; + std::cout << "\t\t\t" + << "subTexelPrecisionBits = " << properties.limits.subTexelPrecisionBits + << "\n"; + std::cout << "\t\t\t" + << "timestampComputeAndGraphics = " + << !!properties.limits.timestampComputeAndGraphics << "\n"; + std::cout << "\t\t\t" + << "timestampPeriod = " << properties.limits.timestampPeriod << "\n"; + std::cout << "\t\t\t" + << "viewportBoundsRange = " + << "[" << properties.limits.viewportBoundsRange[0] << ", " << properties.limits.viewportBoundsRange[1] + << "]" + << "\n"; + std::cout << "\t\t\t" + << "viewportSubPixelBits = " << properties.limits.viewportSubPixelBits + << "\n"; + std::cout << "\t\t" + << "sparseProperties:\n"; + std::cout << "\t\t\t" + << "residencyAlignedMipSize = " + << !!properties.sparseProperties.residencyAlignedMipSize << "\n"; + std::cout << "\t\t\t" + << "residencyNonResidentStrict = " + << !!properties.sparseProperties.residencyNonResidentStrict << "\n"; + std::cout << "\t\t\t" + << "residencyStandard2DBlockShape = " + << !!properties.sparseProperties.residencyStandard2DBlockShape << "\n"; + std::cout << "\t\t\t" + << "residencyStandard2DMultisampleBlockShape = " + << !!properties.sparseProperties.residencyStandard2DMultisampleBlockShape << "\n"; + std::cout << "\t\t\t" + << "residencyStandard3DBlockShape = " + << !!properties.sparseProperties.residencyStandard3DBlockShape << "\n"; + std::cout << "\n"; + + if ( vk::su::contains( extensionProperties, "VK_EXT_blend_operation_advanced" ) ) + { + vk::PhysicalDeviceBlendOperationAdvancedPropertiesEXT const & blendOperationAdvancedProperties = + properties2.get(); + std::cout << "\t" + << "BlendOperationAdvancedProperties:\n"; + std::cout << "\t\t" + << "advancedBlendAllOperations = " + << !!blendOperationAdvancedProperties.advancedBlendAllOperations << "\n"; + std::cout << "\t\t" + << "advancedBlendCorrelatedOverlap = " + << !!blendOperationAdvancedProperties.advancedBlendCorrelatedOverlap << "\n"; + std::cout << "\t\t" + << "advancedBlendIndependentBlend = " + << !!blendOperationAdvancedProperties.advancedBlendIndependentBlend << "\n"; + std::cout << "\t\t" + << "advancedBlendMaxColorAttachments = " + << blendOperationAdvancedProperties.advancedBlendMaxColorAttachments << "\n"; + std::cout << "\t\t" + << "advancedBlendNonPremultipliedDstColor = " + << !!blendOperationAdvancedProperties.advancedBlendNonPremultipliedDstColor << "\n"; + std::cout << "\t\t" + << "advancedBlendNonPremultipliedSrcColor = " + << !!blendOperationAdvancedProperties.advancedBlendNonPremultipliedSrcColor << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_conservative_rasterization" ) ) + { + vk::PhysicalDeviceConservativeRasterizationPropertiesEXT const & conservativeRasterizationProperties = + properties2.get(); + std::cout << "\t" + << "ConservativeRasterizationProperties:\n"; + std::cout << "\t\t" + << "conservativePointAndLineRasterization = " + << !!conservativeRasterizationProperties.conservativePointAndLineRasterization << "\n"; + std::cout << "\t\t" + << "conservativeRasterizationPostDepthCoverage = " + << !!conservativeRasterizationProperties.conservativeRasterizationPostDepthCoverage << "\n"; + std::cout << "\t\t" + << "degenerateLinesRasterized = " + << !!conservativeRasterizationProperties.degenerateLinesRasterized << "\n"; + std::cout << "\t\t" + << "degenerateTrianglesRasterized = " + << !!conservativeRasterizationProperties.degenerateTrianglesRasterized << "\n"; + std::cout << "\t\t" + << "extraPrimitiveOverestimationSizeGranularity = " + << conservativeRasterizationProperties.extraPrimitiveOverestimationSizeGranularity << "\n"; + std::cout << "\t\t" + << "fullyCoveredFragmentShaderInputVariable = " + << !!conservativeRasterizationProperties.fullyCoveredFragmentShaderInputVariable << "\n"; + std::cout << "\t\t" + << "maxExtraPrimitiveOverestimationSize = " + << conservativeRasterizationProperties.maxExtraPrimitiveOverestimationSize << "\n"; + std::cout << "\t\t" + << "primitiveOverestimationSize = " + << conservativeRasterizationProperties.primitiveOverestimationSize << "\n"; + std::cout << "\t\t" + << "primitiveUnderestimation = " + << !!conservativeRasterizationProperties.primitiveUnderestimation << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_cooperative_matrix" ) ) + { + vk::PhysicalDeviceCooperativeMatrixPropertiesNV const & cooperativeMatrixProperties = + properties2.get(); + std::cout << "\t" + << "CooperativeMatrixProperties:\n"; + std::cout << "\t\t" + << "cooperativeMatrixSupportedStages = " + << vk::to_string( cooperativeMatrixProperties.cooperativeMatrixSupportedStages ) << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_depth_stencil_resolve" ) ) + { + vk::PhysicalDeviceDepthStencilResolvePropertiesKHR const & depthStencilResolveProperties = + properties2.get(); + std::cout << "\t" + << "DepthStencilResolveProperties:\n"; + std::cout << "\t\t" + << "independentResolve = " << !!depthStencilResolveProperties.independentResolve << "\n"; + std::cout << "\t\t" + << "independentResolveNone = " << !!depthStencilResolveProperties.independentResolveNone + << "\n"; + std::cout << "\t\t" + << "supportedDepthResolveModes = " + << vk::to_string( depthStencilResolveProperties.supportedDepthResolveModes ) << "\n"; + std::cout << "\t\t" + << "supportedStencilResolveModes = " + << vk::to_string( depthStencilResolveProperties.supportedStencilResolveModes ) << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_descriptor_indexing" ) ) + { + vk::PhysicalDeviceDescriptorIndexingPropertiesEXT const & descriptorIndexingProperties = + properties2.get(); + std::cout << "\t" + << "DescriptorIndexingProperties:\n"; + std::cout << "\t\t" + << "maxDescriptorSetUpdateAfterBindInputAttachments = " + << descriptorIndexingProperties.maxDescriptorSetUpdateAfterBindInputAttachments << "\n"; + std::cout << "\t\t" + << "maxDescriptorSetUpdateAfterBindSampledImages = " + << descriptorIndexingProperties.maxDescriptorSetUpdateAfterBindSampledImages << "\n"; + std::cout << "\t\t" + << "maxDescriptorSetUpdateAfterBindSamplers = " + << descriptorIndexingProperties.maxDescriptorSetUpdateAfterBindSamplers << "\n"; + std::cout << "\t\t" + << "maxDescriptorSetUpdateAfterBindStorageBuffers = " + << descriptorIndexingProperties.maxDescriptorSetUpdateAfterBindStorageBuffers << "\n"; + std::cout << "\t\t" + << "maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = " + << descriptorIndexingProperties.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic << "\n"; + std::cout << "\t\t" + << "maxDescriptorSetUpdateAfterBindStorageImages = " + << descriptorIndexingProperties.maxDescriptorSetUpdateAfterBindStorageImages << "\n"; + std::cout << "\t\t" + << "maxDescriptorSetUpdateAfterBindUniformBuffers = " + << descriptorIndexingProperties.maxDescriptorSetUpdateAfterBindUniformBuffers << "\n"; + std::cout << "\t\t" + << "maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = " + << descriptorIndexingProperties.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic << "\n"; + std::cout << "\t\t" + << "maxPerStageDescriptorUpdateAfterBindInputAttachments = " + << descriptorIndexingProperties.maxPerStageDescriptorUpdateAfterBindInputAttachments << "\n"; + std::cout << "\t\t" + << "maxPerStageDescriptorUpdateAfterBindSampledImages = " + << descriptorIndexingProperties.maxPerStageDescriptorUpdateAfterBindSampledImages << "\n"; + std::cout << "\t\t" + << "maxPerStageDescriptorUpdateAfterBindSamplers = " + << descriptorIndexingProperties.maxPerStageDescriptorUpdateAfterBindSamplers << "\n"; + std::cout << "\t\t" + << "maxPerStageDescriptorUpdateAfterBindStorageBuffers = " + << descriptorIndexingProperties.maxPerStageDescriptorUpdateAfterBindStorageBuffers << "\n"; + std::cout << "\t\t" + << "maxPerStageDescriptorUpdateAfterBindStorageImages = " + << descriptorIndexingProperties.maxPerStageDescriptorUpdateAfterBindStorageImages << "\n"; + std::cout << "\t\t" + << "maxPerStageDescriptorUpdateAfterBindUniformBuffers = " + << descriptorIndexingProperties.maxPerStageDescriptorUpdateAfterBindUniformBuffers << "\n"; + std::cout << "\t\t" + << "maxPerStageUpdateAfterBindResources = " + << descriptorIndexingProperties.maxPerStageUpdateAfterBindResources << "\n"; + std::cout << "\t\t" + << "maxUpdateAfterBindDescriptorsInAllPools = " + << descriptorIndexingProperties.maxUpdateAfterBindDescriptorsInAllPools << "\n"; + std::cout << "\t\t" + << "quadDivergentImplicitLod = " + << !!descriptorIndexingProperties.quadDivergentImplicitLod << "\n"; + std::cout << "\t\t" + << "robustBufferAccessUpdateAfterBind = " + << !!descriptorIndexingProperties.robustBufferAccessUpdateAfterBind << "\n"; + std::cout << "\t\t" + << "shaderInputAttachmentArrayNonUniformIndexingNative = " + << !!descriptorIndexingProperties.shaderInputAttachmentArrayNonUniformIndexingNative << "\n"; + std::cout << "\t\t" + << "shaderSampledImageArrayNonUniformIndexingNative = " + << !!descriptorIndexingProperties.shaderSampledImageArrayNonUniformIndexingNative << "\n"; + std::cout << "\t\t" + << "shaderStorageBufferArrayNonUniformIndexingNative = " + << !!descriptorIndexingProperties.shaderStorageBufferArrayNonUniformIndexingNative << "\n"; + std::cout << "\t\t" + << "shaderStorageImageArrayNonUniformIndexingNative = " + << !!descriptorIndexingProperties.shaderStorageImageArrayNonUniformIndexingNative << "\n"; + std::cout << "\t\t" + << "shaderUniformBufferArrayNonUniformIndexingNative = " + << !!descriptorIndexingProperties.shaderUniformBufferArrayNonUniformIndexingNative << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_discard_rectangles" ) ) + { + vk::PhysicalDeviceDiscardRectanglePropertiesEXT const & discardRectangleProperties = + properties2.get(); + std::cout << "\t" + << "DiscardRectangleProperties:\n"; + std::cout << "\t\t" + << "maxDiscardRectangles = " << discardRectangleProperties.maxDiscardRectangles << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_driver_properties" ) ) + { + vk::PhysicalDeviceDriverPropertiesKHR const & driverProperties = + properties2.get(); + std::cout << "\t" + << "DriverProperties:\n"; + std::cout << "\t\t" + << "driverID = " << vk::to_string( driverProperties.driverID ) << "\n"; + std::cout << "\t\t" + << "driverName = " << driverProperties.driverName << "\n"; + std::cout << "\t\t" + << "driverInfo = " << driverProperties.driverInfo << "\n"; + std::cout << "\t\t" + << "conformanceVersion = " << static_cast( driverProperties.conformanceVersion.major ) + << "." << static_cast( driverProperties.conformanceVersion.minor ) << "." + << static_cast( driverProperties.conformanceVersion.subminor ) << "." + << static_cast( driverProperties.conformanceVersion.patch ) << std::dec << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_external_memory_host" ) ) + { + vk::PhysicalDeviceExternalMemoryHostPropertiesEXT const & externalMemoryHostProperties = + properties2.get(); + std::cout << "\t" + << "ExternalMemoryHostProperties:\n"; + std::cout << "\t\t" + << "minImportedHostPointerAlignment = " + << externalMemoryHostProperties.minImportedHostPointerAlignment << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_shader_float_controls" ) ) + { + vk::PhysicalDeviceFloatControlsPropertiesKHR const & floatControlsProperties = + properties2.get(); + std::cout << "\t" + << "FloatControlsProperties:\n"; + std::cout << "\t\t" + << "denormBehaviorIndependence = " + << vk::to_string( floatControlsProperties.denormBehaviorIndependence ) << "\n"; + std::cout << "\t\t" + << "roundingModeIndependence = " + << vk::to_string( floatControlsProperties.roundingModeIndependence ) << "\n"; + std::cout << "\t\t" + << "shaderDenormFlushToZeroFloat16 = " + << !!floatControlsProperties.shaderDenormFlushToZeroFloat16 << "\n"; + std::cout << "\t\t" + << "shaderDenormFlushToZeroFloat32 = " + << !!floatControlsProperties.shaderDenormFlushToZeroFloat32 << "\n"; + std::cout << "\t\t" + << "shaderDenormFlushToZeroFloat64 = " + << !!floatControlsProperties.shaderDenormFlushToZeroFloat64 << "\n"; + std::cout << "\t\t" + << "shaderDenormPreserveFloat16 = " << !!floatControlsProperties.shaderDenormPreserveFloat16 + << "\n"; + std::cout << "\t\t" + << "shaderDenormPreserveFloat32 = " << !!floatControlsProperties.shaderDenormPreserveFloat32 + << "\n"; + std::cout << "\t\t" + << "shaderDenormPreserveFloat64 = " << !!floatControlsProperties.shaderDenormPreserveFloat64 + << "\n"; + std::cout << "\t\t" + << "shaderRoundingModeRTEFloat16 = " + << !!floatControlsProperties.shaderRoundingModeRTEFloat16 << "\n"; + std::cout << "\t\t" + << "shaderRoundingModeRTEFloat32 = " + << !!floatControlsProperties.shaderRoundingModeRTEFloat32 << "\n"; + std::cout << "\t\t" + << "shaderRoundingModeRTEFloat64 = " + << !!floatControlsProperties.shaderRoundingModeRTEFloat64 << "\n"; + std::cout << "\t\t" + << "shaderRoundingModeRTZFloat16 = " + << !!floatControlsProperties.shaderRoundingModeRTZFloat16 << "\n"; + std::cout << "\t\t" + << "shaderRoundingModeRTZFloat32 = " + << !!floatControlsProperties.shaderRoundingModeRTZFloat32 << "\n"; + std::cout << "\t\t" + << "shaderRoundingModeRTZFloat64 = " + << !!floatControlsProperties.shaderRoundingModeRTZFloat64 << "\n"; + std::cout << "\t\t" + << "shaderSignedZeroInfNanPreserveFloat16 = " + << !!floatControlsProperties.shaderSignedZeroInfNanPreserveFloat16 << "\n"; + std::cout << "\t\t" + << "shaderSignedZeroInfNanPreserveFloat32 = " + << !!floatControlsProperties.shaderSignedZeroInfNanPreserveFloat32 << "\n"; + std::cout << "\t\t" + << "shaderSignedZeroInfNanPreserveFloat64 = " + << !!floatControlsProperties.shaderSignedZeroInfNanPreserveFloat64 << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_fragment_density_map" ) ) + { + vk::PhysicalDeviceFragmentDensityMapPropertiesEXT const & fragmentDensityMapProperties = + properties2.get(); + std::cout << "\t" + << "FragmentDensityProperties:\n"; + std::cout << "\t\t" + << "fragmentDensityInvocations = " << !!fragmentDensityMapProperties.fragmentDensityInvocations + << "\n"; + std::cout << "\t\t" + << "maxFragmentDensityTexelSize = " << fragmentDensityMapProperties.maxFragmentDensityTexelSize.width + << " x " << fragmentDensityMapProperties.maxFragmentDensityTexelSize.height << "\n"; + std::cout << "\t\t" + << "minFragmentDensityTexelSize = " << fragmentDensityMapProperties.minFragmentDensityTexelSize.width + << " x " << fragmentDensityMapProperties.minFragmentDensityTexelSize.height << "\n"; + std::cout << "\n"; + } + + vk::PhysicalDeviceIDProperties const & idProperties = properties2.get(); + std::cout << "\t" + << "IDProperties:\n"; + std::cout << "\t\t" + << "deviceUUID = " << vk::su::UUID( idProperties.deviceUUID ) << "\n"; + std::cout << "\t\t" + << "driverUUID = " << vk::su::UUID( idProperties.driverUUID ) << "\n"; + std::cout << "\t\t" + << "deviceLUID = " << vk::su::LUID( idProperties.deviceLUID ) << "\n"; + std::cout << "\t\t" + << "deviceNodeMask = " << std::hex << idProperties.deviceNodeMask << std::dec << "\n"; + std::cout << "\t\t" + << "deviceLUIDValid = " << !!idProperties.deviceLUIDValid << "\n"; + std::cout << "\n"; + + if ( vk::su::contains( extensionProperties, "VK_EXT_inline_uniform_block" ) ) + { + vk::PhysicalDeviceInlineUniformBlockPropertiesEXT const & inlineUniformBlockProperties = + properties2.get(); + std::cout << "\t" + << "InlineUniformBlockProperties:\n"; + std::cout << "\t\t" + << "maxDescriptorSetInlineUniformBlocks = " + << inlineUniformBlockProperties.maxDescriptorSetInlineUniformBlocks << "\n"; + std::cout << "\t\t" + << "maxDescriptorSetUpdateAfterBindInlineUniformBlocks = " + << inlineUniformBlockProperties.maxDescriptorSetUpdateAfterBindInlineUniformBlocks << "\n"; + std::cout << "\t\t" + << "maxInlineUniformBlockSize = " + << inlineUniformBlockProperties.maxInlineUniformBlockSize << "\n"; + std::cout << "\t\t" + << "maxPerStageDescriptorInlineUniformBlocks = " + << inlineUniformBlockProperties.maxPerStageDescriptorInlineUniformBlocks << "\n"; + std::cout << "\t\t" + << "maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks = " + << inlineUniformBlockProperties.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_line_rasterization" ) ) + { + vk::PhysicalDeviceLineRasterizationPropertiesEXT const & lineRasterizationProperties = + properties2.get(); + std::cout << "\t" + << "LineRasterizationProperties:\n"; + std::cout << "\t\t" + << "lineSubPixelPrecisionBits = " << lineRasterizationProperties.lineSubPixelPrecisionBits << "\n"; + std::cout << "\n"; + } + + vk::PhysicalDeviceMaintenance3Properties const & maintenance3Properties = + properties2.get(); + std::cout << "\t" + << "Maintenance3Properties:\n"; + std::cout << "\t\t" + << "maxMemoryAllocationSize = " << maintenance3Properties.maxMemoryAllocationSize << "\n"; + std::cout << "\t\t" + << "maxPerSetDescriptors = " << maintenance3Properties.maxPerSetDescriptors << "\n"; + std::cout << "\n"; + + if ( vk::su::contains( extensionProperties, "VK_NV_mesh_shader" ) ) + { + vk::PhysicalDeviceMeshShaderPropertiesNV const & meshShaderProperties = + properties2.get(); + std::cout << "\t" + << "MeshShaderProperties:\n"; + std::cout << "\t\t" + << "maxDrawMeshTasksCount = " << meshShaderProperties.maxDrawMeshTasksCount << "\n"; + std::cout << "\t\t" + << "maxMeshMultiviewViewCount = " << meshShaderProperties.maxMeshMultiviewViewCount << "\n"; + std::cout << "\t\t" + << "maxMeshOutputPrimitives = " << meshShaderProperties.maxMeshOutputPrimitives << "\n"; + std::cout << "\t\t" + << "maxMeshOutputVertices = " << meshShaderProperties.maxMeshOutputVertices << "\n"; + std::cout << "\t\t" + << "maxMeshTotalMemorySize = " << meshShaderProperties.maxMeshTotalMemorySize << "\n"; + std::cout << "\t\t" + << "maxMeshWorkGroupInvocations = " << meshShaderProperties.maxMeshWorkGroupInvocations << "\n"; + std::cout << "\t\t" + << "maxMeshWorkGroupSize = " + << "[" << meshShaderProperties.maxMeshWorkGroupSize[0] << ", " + << meshShaderProperties.maxMeshWorkGroupSize[1] << ", " + << meshShaderProperties.maxMeshWorkGroupSize[2] << "]" + << "\n"; + std::cout << "\t\t" + << "maxTaskOutputCount = " << meshShaderProperties.maxTaskOutputCount << "\n"; + std::cout << "\t\t" + << "maxTaskTotalMemorySize = " << meshShaderProperties.maxTaskTotalMemorySize << "\n"; + std::cout << "\t\t" + << "maxTaskWorkGroupInvocations = " << meshShaderProperties.maxTaskWorkGroupInvocations << "\n"; + std::cout << "\t\t" + << "maxTaskWorkGroupSize = " + << "[" << meshShaderProperties.maxTaskWorkGroupSize[0] << ", " + << meshShaderProperties.maxTaskWorkGroupSize[1] << ", " + << meshShaderProperties.maxTaskWorkGroupSize[2] << "]" + << "\n"; + std::cout << "\t\t" + << "meshOutputPerPrimitiveGranularity = " << meshShaderProperties.meshOutputPerPrimitiveGranularity + << "\n"; + std::cout << "\t\t" + << "meshOutputPerVertexGranularity = " << meshShaderProperties.meshOutputPerVertexGranularity + << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NVX_multiview_per_view_attributes" ) ) + { + vk::PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const & multiviewPerViewAttributesProperties = + properties2.get(); + std::cout << "\t" + << "MultiviewPerViewAttributesProperties:\n"; + std::cout << "\t\t" + << "perViewPositionAllComponents = " + << !!multiviewPerViewAttributesProperties.perViewPositionAllComponents << "\n"; + std::cout << "\n"; + } + + vk::PhysicalDeviceMultiviewProperties const & multiviewProperties = + properties2.get(); + std::cout << "\t" + << "MultiviewProperties:\n"; + std::cout << "\t\t" + << "maxMultiviewInstanceIndex = " << multiviewProperties.maxMultiviewInstanceIndex << "\n"; + std::cout << "\t\t" + << "maxMultiviewViewCount = " << multiviewProperties.maxMultiviewViewCount << "\n"; + std::cout << "\n"; + + if ( vk::su::contains( extensionProperties, "VK_EXT_pci_bus_info" ) ) + { + vk::PhysicalDevicePCIBusInfoPropertiesEXT const & pciBusInfoProperties = + properties2.get(); + std::cout << "\t" + << "PCIBusInfoProperties:\n"; + std::cout << "\t\t" + << "pciDomain = " << pciBusInfoProperties.pciDomain << "\n"; + std::cout << "\t\t" + << "pciBus = " << pciBusInfoProperties.pciBus << "\n"; + std::cout << "\t\t" + << "pciDevice = " << pciBusInfoProperties.pciDevice << "\n"; + std::cout << "\t\t" + << "pciFunction = " << pciBusInfoProperties.pciFunction << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_maintenance2" ) ) + { + vk::PhysicalDevicePointClippingProperties const & pointClippingProperties = + properties2.get(); + std::cout << "\t" + << "PointClippingProperties:\n"; + std::cout << "\t\t" + << "pointClippingBehavior = " << vk::to_string( pointClippingProperties.pointClippingBehavior ) + << "\n"; + std::cout << "\n"; + } + + vk::PhysicalDeviceProtectedMemoryProperties const & protectedMemoryProperties = + properties2.get(); + std::cout << "\t" + << "ProtectedMemoryProperties:\n"; + std::cout << "\t\t" + << "protectedNoFault = " << !!protectedMemoryProperties.protectedNoFault << "\n"; + std::cout << "\n"; + + if ( vk::su::contains( extensionProperties, "VK_KHR_push_descriptor" ) ) + { + vk::PhysicalDevicePushDescriptorPropertiesKHR const & pushDescriptorProperties = + properties2.get(); + std::cout << "\t" + << "PushDescriptorProperties:\n"; + std::cout << "\t\t" + << "maxPushDescriptors = " << pushDescriptorProperties.maxPushDescriptors << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_ray_tracing" ) ) + { + vk::PhysicalDeviceRayTracingPropertiesNV const & rayTracingProperties = + properties2.get(); + std::cout << "\t" + << "RayTracingProperties:\n"; + std::cout << "\t\t" + << "maxDescriptorSetAccelerationStructures = " + << rayTracingProperties.maxDescriptorSetAccelerationStructures << "\n"; + std::cout << "\t\t" + << "maxGeometryCount = " << rayTracingProperties.maxGeometryCount << "\n"; + std::cout << "\t\t" + << "maxInstanceCount = " << rayTracingProperties.maxInstanceCount << "\n"; + std::cout << "\t\t" + << "maxRecursionDepth = " << rayTracingProperties.maxRecursionDepth << "\n"; + std::cout << "\t\t" + << "maxShaderGroupStride = " << rayTracingProperties.maxShaderGroupStride << "\n"; + std::cout << "\t\t" + << "maxTriangleCount = " << rayTracingProperties.maxTriangleCount << "\n"; + std::cout << "\t\t" + << "shaderGroupBaseAlignment = " << rayTracingProperties.shaderGroupBaseAlignment + << "\n"; + std::cout << "\t\t" + << "shaderGroupHandleSize = " << rayTracingProperties.shaderGroupHandleSize << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_sample_locations" ) ) + { + vk::PhysicalDeviceSampleLocationsPropertiesEXT const & sampleLocationProperties = + properties2.get(); + std::cout << "\t" + << "SampleLocationProperties:\n"; + std::cout << "\t\t" + << "maxSampleLocationGridSize = " << sampleLocationProperties.maxSampleLocationGridSize.width + << " x " << sampleLocationProperties.maxSampleLocationGridSize.height << "\n"; + std::cout << "\t\t" + << "sampleLocationCoordinateRange = " + << "[" << sampleLocationProperties.sampleLocationCoordinateRange[0] << ", " + << sampleLocationProperties.sampleLocationCoordinateRange[1] << "]" + << "\n"; + std::cout << "\t\t" + << "sampleLocationSampleCounts = " + << vk::to_string( sampleLocationProperties.sampleLocationSampleCounts ) << "\n"; + std::cout << "\t\t" + << "sampleLocationSubPixelBits = " << sampleLocationProperties.sampleLocationSubPixelBits << "\n"; + std::cout << "\t\t" + << "variableSampleLocations = " << !!sampleLocationProperties.variableSampleLocations << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_sampler_filter_minmax" ) ) + { + vk::PhysicalDeviceSamplerFilterMinmaxPropertiesEXT const & samplerFilterMinmaxProperties = + properties2.get(); + std::cout << "\t" + << "SamplerFilterMinmaxProperties:\n"; + std::cout << "\t\t" + << "filterMinmaxImageComponentMapping = " + << !!samplerFilterMinmaxProperties.filterMinmaxImageComponentMapping << "\n"; + std::cout << "\t\t" + << "filterMinmaxSingleComponentFormats = " + << !!samplerFilterMinmaxProperties.filterMinmaxSingleComponentFormats << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_AMD_shader_core_properties2" ) ) + { + vk::PhysicalDeviceShaderCoreProperties2AMD const & shaderCoreProperties2 = + properties2.get(); + std::cout << "\t" + << "ShaderCoreProperties2:\n"; + std::cout << "\t\t" + << "activeComputeUnitCount = " << shaderCoreProperties2.activeComputeUnitCount << "\n"; + std::cout << "\t\t" + << "shaderCoreFeatures = " << vk::to_string( shaderCoreProperties2.shaderCoreFeatures ) << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_AMD_shader_core_properties2" ) ) + { + vk::PhysicalDeviceShaderCorePropertiesAMD const & shaderCoreProperties = + properties2.get(); + std::cout << "\t" + << "ShaderCoreProperties:\n"; + std::cout << "\t\t" + << "computeUnitsPerShaderArray = " << shaderCoreProperties.computeUnitsPerShaderArray << "\n"; + std::cout << "\t\t" + << "maxSgprAllocation = " << shaderCoreProperties.maxSgprAllocation << "\n"; + std::cout << "\t\t" + << "maxVgprAllocation = " << shaderCoreProperties.maxVgprAllocation << "\n"; + std::cout << "\t\t" + << "minSgprAllocation = " << shaderCoreProperties.minSgprAllocation << "\n"; + std::cout << "\t\t" + << "minVgprAllocation = " << shaderCoreProperties.minVgprAllocation << "\n"; + std::cout << "\t\t" + << "sgprAllocationGranularity = " << shaderCoreProperties.sgprAllocationGranularity << "\n"; + std::cout << "\t\t" + << "sgprsPerSimd = " << shaderCoreProperties.sgprsPerSimd << "\n"; + std::cout << "\t\t" + << "shaderArraysPerEngineCount = " << shaderCoreProperties.shaderArraysPerEngineCount << "\n"; + std::cout << "\t\t" + << "shaderEngineCount = " << shaderCoreProperties.shaderEngineCount << "\n"; + std::cout << "\t\t" + << "simdPerComputeUnit = " << shaderCoreProperties.simdPerComputeUnit << "\n"; + std::cout << "\t\t" + << "vgprAllocationGranularity = " << shaderCoreProperties.vgprAllocationGranularity << "\n"; + std::cout << "\t\t" + << "vgprsPerSimd = " << shaderCoreProperties.vgprsPerSimd << "\n"; + std::cout << "\t\t" + << "wavefrontSize = " << shaderCoreProperties.wavefrontSize << "\n"; + std::cout << "\t\t" + << "wavefrontsPerSimd = " << shaderCoreProperties.wavefrontsPerSimd << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_shader_sm_builtins" ) ) + { + vk::PhysicalDeviceShaderSMBuiltinsPropertiesNV const & shaderSMBuiltinsProperties = + properties2.get(); + std::cout << "\t" + << "ShaderSMBuiltinsProperties:\n"; + std::cout << "\t\t" + << "shaderSMCount = " << shaderSMBuiltinsProperties.shaderSMCount << "\n"; + std::cout << "\t\t" + << "shaderWarpsPerSM = " << shaderSMBuiltinsProperties.shaderWarpsPerSM << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_NV_shading_rate_image" ) ) + { + vk::PhysicalDeviceShadingRateImagePropertiesNV const & shadingRageImageProperties = + properties2.get(); + std::cout << "\t" + << "ShadingRateImageProperties:\n"; + std::cout << "\t\t" + << "shadingRateMaxCoarseSamples = " << shadingRageImageProperties.shadingRateMaxCoarseSamples << "\n"; + std::cout << "\t\t" + << "shadingRatePaletteSize = " << shadingRageImageProperties.shadingRatePaletteSize << "\n"; + std::cout << "\t\t" + << "shadingRatePaletteSize = " + << "[" << shadingRageImageProperties.shadingRateTexelSize.width << " x " + << shadingRageImageProperties.shadingRateTexelSize.height << "]" + << "\n"; + std::cout << "\n"; + } + + vk::PhysicalDeviceSubgroupProperties const & subgroupProperties = + properties2.get(); + std::cout << "\t" + << "SubgroupProperties:\n"; + std::cout << "\t\t" + << "quadOperationsInAllStages = " << !!subgroupProperties.quadOperationsInAllStages << "\n"; + std::cout << "\t\t" + << "subgroupSize = " << subgroupProperties.subgroupSize << "\n"; + std::cout << "\t\t" + << "supportedOperations = " << vk::to_string( subgroupProperties.supportedOperations ) << "\n"; + std::cout << "\t\t" + << "supportedStages = " << vk::to_string( subgroupProperties.supportedStages ) << "\n"; + std::cout << "\n"; + + if ( vk::su::contains( extensionProperties, "VK_EXT_subgroup_size_control" ) ) + { + vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT const & subgroupSizeControlProperties = + properties2.get(); + std::cout << "\t" + << "SubgroupSizeControlProperties:\n"; + std::cout << "\t\t" + << "maxComputeWorkgroupSubgroups = " << subgroupSizeControlProperties.maxComputeWorkgroupSubgroups + << "\n"; + std::cout << "\t\t" + << "maxSubgroupSize = " << subgroupSizeControlProperties.maxSubgroupSize << "\n"; + std::cout << "\t\t" + << "minSubgroupSize = " << subgroupSizeControlProperties.minSubgroupSize << "\n"; + std::cout << "\t\t" + << "requiredSubgroupSizeStages = " + << vk::to_string( subgroupSizeControlProperties.requiredSubgroupSizeStages ) << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_timeline_semaphore" ) ) + { + vk::PhysicalDeviceTimelineSemaphorePropertiesKHR const & timelineSemaphoreProperties = + properties2.get(); + std::cout << "\t" + << "TimelineSemaphoreProperties:\n"; + std::cout << "\t\t" + << "maxTimelineSemaphoreValueDifference = " + << timelineSemaphoreProperties.maxTimelineSemaphoreValueDifference << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_texel_buffer_alignment" ) ) + { + vk::PhysicalDeviceTexelBufferAlignmentPropertiesEXT const & texelBufferAlignmentProperties = + properties2.get(); + std::cout << "\t" + << "TexelBufferAlignmentProperties:\n"; + std::cout << "\t\t" + << "storageTexelBufferOffsetAlignmentBytes = " + << texelBufferAlignmentProperties.storageTexelBufferOffsetAlignmentBytes << "\n"; + std::cout << "\t\t" + << "storageTexelBufferOffsetSingleTexelAlignment = " + << !!texelBufferAlignmentProperties.storageTexelBufferOffsetSingleTexelAlignment << "\n"; + std::cout << "\t\t" + << "uniformTexelBufferOffsetAlignmentBytes = " + << texelBufferAlignmentProperties.uniformTexelBufferOffsetAlignmentBytes << "\n"; + std::cout << "\t\t" + << "uniformTexelBufferOffsetSingleTexelAlignment = " + << !!texelBufferAlignmentProperties.uniformTexelBufferOffsetSingleTexelAlignment << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_transform_feedback" ) ) + { + vk::PhysicalDeviceTransformFeedbackPropertiesEXT const & transformFeedbackProperties = + properties2.get(); + std::cout << "\t" + << "TransformFeedbackProperties:\n"; + std::cout << "\t\t" + << "maxTransformFeedbackBufferDataSize = " + << transformFeedbackProperties.maxTransformFeedbackBufferDataSize << "\n"; + std::cout << "\t\t" + << "maxTransformFeedbackBufferDataStride = " + << transformFeedbackProperties.maxTransformFeedbackBufferDataStride << "\n"; + std::cout << "\t\t" + << "maxTransformFeedbackBuffers = " + << transformFeedbackProperties.maxTransformFeedbackBuffers << "\n"; + std::cout << "\t\t" + << "maxTransformFeedbackBufferSize = " + << transformFeedbackProperties.maxTransformFeedbackBufferSize << "\n"; + std::cout << "\t\t" + << "maxTransformFeedbackStreamDataSize = " + << transformFeedbackProperties.maxTransformFeedbackStreamDataSize << "\n"; + std::cout << "\t\t" + << "maxTransformFeedbackStreams = " + << transformFeedbackProperties.maxTransformFeedbackStreams << "\n"; + std::cout << "\t\t" + << "transformFeedbackDraw = " + << !!transformFeedbackProperties.transformFeedbackDraw << "\n"; + std::cout << "\t\t" + << "transformFeedbackQueries = " + << !!transformFeedbackProperties.transformFeedbackQueries << "\n"; + std::cout << "\t\t" + << "transformFeedbackRasterizationStreamSelect = " + << !!transformFeedbackProperties.transformFeedbackRasterizationStreamSelect << "\n"; + std::cout << "\t\t" + << "transformFeedbackStreamsLinesTriangles = " + << !!transformFeedbackProperties.transformFeedbackStreamsLinesTriangles << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_vertex_attribute_divisor" ) ) + { + vk::PhysicalDeviceVertexAttributeDivisorPropertiesEXT const & vertexAttributeDivisorProperties = + properties2.get(); + std::cout << "\t" + << "VertexAttributeDivisorProperties:\n"; + std::cout << "\t\t" + << "maxVertexAttribDivisor = " << vertexAttributeDivisorProperties.maxVertexAttribDivisor << "\n"; + std::cout << "\n"; + } + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/PhysicalDeviceQueueFamilyProperties/CMakeLists.txt b/RAII_Samples/PhysicalDeviceQueueFamilyProperties/CMakeLists.txt new file mode 100644 index 0000000..d1a7e57 --- /dev/null +++ b/RAII_Samples/PhysicalDeviceQueueFamilyProperties/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_PhysicalDeviceQueueFamilyProperties) + +set(HEADERS +) + +set(SOURCES + PhysicalDeviceQueueFamilyProperties.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_PhysicalDeviceQueueFamilyProperties + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_PhysicalDeviceQueueFamilyProperties PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_PhysicalDeviceQueueFamilyProperties PRIVATE utils) diff --git a/RAII_Samples/PhysicalDeviceQueueFamilyProperties/PhysicalDeviceQueueFamilyProperties.cpp b/RAII_Samples/PhysicalDeviceQueueFamilyProperties/PhysicalDeviceQueueFamilyProperties.cpp new file mode 100644 index 0000000..211657d --- /dev/null +++ b/RAII_Samples/PhysicalDeviceQueueFamilyProperties/PhysicalDeviceQueueFamilyProperties.cpp @@ -0,0 +1,101 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : PhysicalDeviceQueueFamilyProperties +// Get queue family properties per physical device. + +#include "../utils/utils.hpp" +#include "vulkan/vulkan.hpp" + +#include +#include +#include + +static char const * AppName = "PhysicalDeviceQueueFamilyProperties"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, {}, VK_API_VERSION_1_1 ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + + // enumerate the physicalDevices + vk::raii::PhysicalDevices physicalDevices( *instance ); + + /* VULKAN_KEY_START */ + + std::cout << std::boolalpha; + for ( size_t i = 0; i < physicalDevices.size(); i++ ) + { + // some features are only valid, if a corresponding extension is available! + std::vector extensionProperties = + physicalDevices[i].enumerateDeviceExtensionProperties(); + + std::cout << "PhysicalDevice " << i << " :" << std::endl; + + // need to explicitly specify all the template arguments for getQueueFamilyProperties2 to make the compiler happy + using Chain = vk::StructureChain; + auto queueFamilyProperties2 = physicalDevices[i].getQueueFamilyProperties2(); + for ( size_t j = 0; j < queueFamilyProperties2.size(); j++ ) + { + std::cout << "\tQueueFamily " << j << " :" << std::endl; + vk::QueueFamilyProperties const & properties = + queueFamilyProperties2[j].get().queueFamilyProperties; + std::cout << "\t\tQueueFamilyProperties:" << std::endl; + std::cout << "\t\t\tqueueFlags = " << vk::to_string( properties.queueFlags ) << std::endl; + std::cout << "\t\t\tqueueCount = " << properties.queueCount << std::endl; + std::cout << "\t\t\ttimestampValidBits = " << properties.timestampValidBits << std::endl; + std::cout << "\t\t\tminImageTransferGranularity = " << properties.minImageTransferGranularity.width << " x " + << properties.minImageTransferGranularity.height << " x " + << properties.minImageTransferGranularity.depth << std::endl; + std::cout << std::endl; + + if ( vk::su::contains( extensionProperties, "VK_NV_device_diagnostic_checkpoints" ) ) + { + vk::QueueFamilyCheckpointPropertiesNV const & checkpointProperties = + queueFamilyProperties2[j].get(); + std::cout << "\t\tCheckPointPropertiesNV:" << std::endl; + std::cout << "\t\t\tcheckpointExecutionStageMask = " + << vk::to_string( checkpointProperties.checkpointExecutionStageMask ) << std::endl; + std::cout << std::endl; + } + } + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/PipelineCache/CMakeLists.txt b/RAII_Samples/PipelineCache/CMakeLists.txt new file mode 100644 index 0000000..c86d37f --- /dev/null +++ b/RAII_Samples/PipelineCache/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_PipelineCache) + +set(HEADERS +) + +set(SOURCES + PipelineCache.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_PipelineCache + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_PipelineCache PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_PipelineCache PRIVATE utils) diff --git a/RAII_Samples/PipelineCache/PipelineCache.cpp b/RAII_Samples/PipelineCache/PipelineCache.cpp new file mode 100644 index 0000000..11d95e8 --- /dev/null +++ b/RAII_Samples/PipelineCache/PipelineCache.cpp @@ -0,0 +1,416 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : PipelineCache +// This sample tries to save and reuse pipeline cache data between runs. + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wmissing-braces" +#elif defined( __GNUC__ ) +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan.hpp" + +#include +#include +#include + +// For timestamp code (getMilliseconds) +#ifdef WIN32 +# include +#else +# include +#endif + +typedef unsigned long long timestamp_t; +timestamp_t getMilliseconds() +{ +#ifdef WIN32 + LARGE_INTEGER frequency; + BOOL useQPC = QueryPerformanceFrequency( &frequency ); + if ( useQPC ) + { + LARGE_INTEGER now; + QueryPerformanceCounter( &now ); + return ( 1000LL * now.QuadPart ) / frequency.QuadPart; + } + else + { + return GetTickCount(); + } +#else + struct timeval now; + gettimeofday( &now, NULL ); + return ( now.tv_usec / 1000 ) + (timestamp_t)now.tv_sec; +#endif +} + +static char const * AppName = "PipelineCache"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + vk::PhysicalDeviceProperties properties = physicalDevice->getProperties(); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + vk::raii::su::TextureData textureData( *physicalDevice, *device ); + + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + textureData.setImage( *commandBuffer, vk::su::MonochromeImageGenerator( { 118, 185, 0 } ) ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcMatrix ); + + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, + { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, + { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); + std::unique_ptr pipelineLayout = + vk::raii::su::makeUniquePipelineLayout( *device, *descriptorSetLayout ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, depthBufferData.format ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + vk::raii::su::BufferData vertexBufferData( + *physicalDevice, *device, sizeof( texturedCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + vk::raii::su::copyToDevice( + *vertexBufferData.deviceMemory, texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); + + std::unique_ptr descriptorPool = vk::raii::su::makeUniqueDescriptorPool( + *device, { { vk::DescriptorType::eUniformBuffer, 1 }, { vk::DescriptorType::eCombinedImageSampler, 1 } } ); + std::unique_ptr descriptorSet = + vk::raii::su::makeUniqueDescriptorSet( *device, *descriptorPool, *descriptorSetLayout ); + + vk::raii::su::updateDescriptorSets( *device, + *descriptorSet, + { { vk::DescriptorType::eUniformBuffer, *uniformBufferData.buffer, nullptr } }, + { textureData } ); + + /* VULKAN_KEY_START */ + + // Check disk for existing cache data + size_t startCacheSize = 0; + char * startCacheData = nullptr; + + std::string cacheFileName = "pipeline_cache_data.bin"; + std::ifstream readCacheStream( cacheFileName, std::ios_base::in | std::ios_base::binary ); + if ( readCacheStream.good() ) + { + // Determine cache size + readCacheStream.seekg( 0, readCacheStream.end ); + startCacheSize = static_cast( readCacheStream.tellg() ); + readCacheStream.seekg( 0, readCacheStream.beg ); + + // Allocate memory to hold the initial cache data + startCacheData = (char *)std::malloc( startCacheSize ); + + // Read the data into our buffer + readCacheStream.read( startCacheData, startCacheSize ); + + // Clean up and print results + readCacheStream.close(); + std::cout << " Pipeline cache HIT!\n"; + std::cout << " cacheData loaded from " << cacheFileName << "\n"; + } + else + { + // No cache found on disk + std::cout << " Pipeline cache miss!\n"; + } + + if ( startCacheData != nullptr ) + { + // Check for cache validity + // + // TODO: Update this as the spec evolves. The fields are not defined by the header. + // + // The code below supports SDK 0.10 Vulkan spec, which contains the following table: + // + // Offset Size Meaning + // ------ ------------ ------------------------------------------------------------------ + // 0 4 a device ID equal to VkPhysicalDeviceProperties::DeviceId written + // as a stream of bytes, with the least significant byte first + // + // 4 VK_UUID_SIZE a pipeline cache ID equal to VkPhysicalDeviceProperties::pipelineCacheUUID + // + // + // The code must be updated for latest Vulkan spec, which contains the following table: + // + // Offset Size Meaning + // ------ ------------ ------------------------------------------------------------------ + // 0 4 length in bytes of the entire pipeline cache header written as a + // stream of bytes, with the least significant byte first + // 4 4 a VkPipelineCacheHeaderVersion value written as a stream of bytes, + // with the least significant byte first + // 8 4 a vendor ID equal to VkPhysicalDeviceProperties::vendorID written + // as a stream of bytes, with the least significant byte first + // 12 4 a device ID equal to VkPhysicalDeviceProperties::deviceID written + // as a stream of bytes, with the least significant byte first + // 16 VK_UUID_SIZE a pipeline cache ID equal to VkPhysicalDeviceProperties::pipelineCacheUUID + + uint32_t headerLength = 0; + uint32_t cacheHeaderVersion = 0; + uint32_t vendorID = 0; + uint32_t deviceID = 0; + uint8_t pipelineCacheUUID[VK_UUID_SIZE] = {}; + + memcpy( &headerLength, (uint8_t *)startCacheData + 0, 4 ); + memcpy( &cacheHeaderVersion, (uint8_t *)startCacheData + 4, 4 ); + memcpy( &vendorID, (uint8_t *)startCacheData + 8, 4 ); + memcpy( &deviceID, (uint8_t *)startCacheData + 12, 4 ); + memcpy( pipelineCacheUUID, (uint8_t *)startCacheData + 16, VK_UUID_SIZE ); + + // Check each field and report bad values before freeing existing cache + bool badCache = false; + + if ( headerLength <= 0 ) + { + badCache = true; + std::cout << " Bad header length in " << cacheFileName << ".\n"; + std::cout << " Cache contains: " << std::hex << std::setw( 8 ) << headerLength << "\n"; + } + + if ( cacheHeaderVersion != VK_PIPELINE_CACHE_HEADER_VERSION_ONE ) + { + badCache = true; + std::cout << " Unsupported cache header version in " << cacheFileName << ".\n"; + std::cout << " Cache contains: " << std::hex << std::setw( 8 ) << cacheHeaderVersion << "\n"; + } + + if ( vendorID != properties.vendorID ) + { + badCache = true; + std::cout << " Vender ID mismatch in " << cacheFileName << ".\n"; + std::cout << " Cache contains: " << std::hex << std::setw( 8 ) << vendorID << "\n"; + std::cout << " Driver expects: " << std::hex << std::setw( 8 ) << properties.vendorID << "\n"; + } + + if ( deviceID != properties.deviceID ) + { + badCache = true; + std::cout << " Device ID mismatch in " << cacheFileName << ".\n"; + std::cout << " Cache contains: " << std::hex << std::setw( 8 ) << deviceID << "\n"; + std::cout << " Driver expects: " << std::hex << std::setw( 8 ) << properties.deviceID << "\n"; + } + + if ( memcmp( pipelineCacheUUID, properties.pipelineCacheUUID, sizeof( pipelineCacheUUID ) ) != 0 ) + { + badCache = true; + std::cout << " UUID mismatch in " << cacheFileName << ".\n"; + std::cout << " Cache contains: " << vk::su::UUID( pipelineCacheUUID ) << "\n"; + std::cout << " Driver expects: " << vk::su::UUID( properties.pipelineCacheUUID ) << "\n"; + } + + if ( badCache ) + { + // Don't submit initial cache data if any version info is incorrect + free( startCacheData ); + startCacheSize = 0; + startCacheData = nullptr; + + // And clear out the old cache file for use in next run + std::cout << " Deleting cache entry " << cacheFileName << " to repopulate.\n"; + if ( remove( cacheFileName.c_str() ) != 0 ) + { + std::cerr << "Reading error"; + exit( EXIT_FAILURE ); + } + } + } + + // Feed the initial cache data into cache creation + vk::PipelineCacheCreateInfo pipelineCacheCreateInfo( {}, startCacheSize, startCacheData ); + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, pipelineCacheCreateInfo ); + + // Free our initialData now that pipeline cache has been created + free( startCacheData ); + startCacheData = NULL; + + // Time (roughly) taken to create the graphics pipeline + timestamp_t start = getMilliseconds(); + std::unique_ptr graphicsPipeline = vk::raii::su::makeUniqueGraphicsPipeline( + *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + sizeof( texturedCubeData[0] ), + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + vk::FrontFace::eClockwise, + true, + *pipelineLayout, + *renderPass ); + timestamp_t elapsed = getMilliseconds() - start; + std::cout << " vkCreateGraphicsPipeline time: " << (double)elapsed << " ms\n"; + + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + + // Get the index of the next available swapchain image: + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + + commandBuffer->beginRenderPass( + vk::RenderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D(), surfaceData.extent ), clearValues ), + vk::SubpassContents::eInline ); + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + commandBuffer->bindDescriptorSets( vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, {} ); + + commandBuffer->bindVertexBuffers( 0, { **vertexBufferData.buffer }, { 0 } ); + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + commandBuffer->draw( 12 * 3, 1, 0, 0 ); + commandBuffer->endRenderPass(); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + + while ( vk::Result::eTimeout == device->waitForFences( { **drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + // Store away the cache that we've populated. This could conceivably happen + // earlier, depends on when the pipeline cache stops being populated + // internally. + std::vector endCacheData = pipelineCache->getData(); + + // Write the file to disk, overwriting whatever was there + std::ofstream writeCacheStream( cacheFileName, std::ios_base::out | std::ios_base::binary ); + if ( writeCacheStream.good() ) + { + writeCacheStream.write( reinterpret_cast( endCacheData.data() ), endCacheData.size() ); + writeCacheStream.close(); + std::cout << " cacheData written to " << cacheFileName << "\n"; + } + else + { + // Something bad happened + std::cout << " Unable to write cache data to disk!\n"; + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/PipelineDerivative/CMakeLists.txt b/RAII_Samples/PipelineDerivative/CMakeLists.txt new file mode 100644 index 0000000..0a8df17 --- /dev/null +++ b/RAII_Samples/PipelineDerivative/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_PipelineDerivative) + +set(HEADERS +) + +set(SOURCES + PipelineDerivative.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_PipelineDerivative + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_PipelineDerivative PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_PipelineDerivative PRIVATE utils) diff --git a/RAII_Samples/PipelineDerivative/PipelineDerivative.cpp b/RAII_Samples/PipelineDerivative/PipelineDerivative.cpp new file mode 100644 index 0000000..b35ca4b --- /dev/null +++ b/RAII_Samples/PipelineDerivative/PipelineDerivative.cpp @@ -0,0 +1,332 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : PipelineDerivative +// This sample creates pipeline derivative and draws with it. + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wmissing-braces" +#elif defined( __GNUC__ ) +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan.hpp" + +#include + +static char const * AppName = "PipelineDerivative"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + vk::raii::su::TextureData textureData( *physicalDevice, *device ); + + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + textureData.setImage( *commandBuffer, vk::su::CheckerboardImageGenerator() ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcMatrix ); + + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, + { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, + { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); + std::unique_ptr pipelineLayout = + vk::raii::su::makeUniquePipelineLayout( *device, *descriptorSetLayout ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, depthBufferData.format ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + vk::raii::su::BufferData vertexBufferData( + *physicalDevice, *device, sizeof( texturedCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + vk::raii::su::copyToDevice( + *vertexBufferData.deviceMemory, texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); + + std::unique_ptr descriptorPool = vk::raii::su::makeUniqueDescriptorPool( + *device, { { vk::DescriptorType::eUniformBuffer, 1 }, { vk::DescriptorType::eCombinedImageSampler, 1 } } ); + std::unique_ptr descriptorSet = + vk::raii::su::makeUniqueDescriptorSet( *device, *descriptorPool, *descriptorSetLayout ); + + vk::raii::su::updateDescriptorSets( *device, + *descriptorSet, + { { vk::DescriptorType::eUniformBuffer, *uniformBufferData.buffer, nullptr } }, + { textureData } ); + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + + /* VULKAN_KEY_START */ + + // Create two pipelines. + // + // First pipeline has VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT set. + // Second pipeline has a modified fragment shader and sets the VK_PIPELINE_CREATE_DERIVATIVE_BIT flag. + + std::array pipelineShaderStageCreateInfos = { + vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eVertex, **vertexShaderModule, "main" ), + vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eFragment, **fragmentShaderModule, "main" ) + }; + + vk::VertexInputBindingDescription vertexInputBindingDescription( 0, sizeof( texturedCubeData[0] ) ); + std::array vertexInputAttributeDescriptions = { + vk::VertexInputAttributeDescription( 0, 0, vk::Format::eR32G32B32A32Sfloat, 0 ), + vk::VertexInputAttributeDescription( 1, 0, vk::Format::eR32G32B32A32Sfloat, 16 ) + }; + vk::PipelineVertexInputStateCreateInfo pipelineVertexInputStateCreateInfo( + {}, vertexInputBindingDescription, vertexInputAttributeDescriptions ); + + vk::PipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateCreateInfo( + {}, vk::PrimitiveTopology::eTriangleList ); + + vk::PipelineViewportStateCreateInfo pipelineViewportStateCreateInfo( {}, 1, nullptr, 1, nullptr ); + + vk::PipelineRasterizationStateCreateInfo pipelineRasterizationStateCreateInfo( {}, + false, + false, + vk::PolygonMode::eFill, + vk::CullModeFlagBits::eBack, + vk::FrontFace::eClockwise, + false, + 0.0f, + 0.0f, + 0.0f, + 1.0f ); + + vk::PipelineMultisampleStateCreateInfo pipelineMultisampleStateCreateInfo( {}, vk::SampleCountFlagBits::e1 ); + + vk::StencilOpState stencilOpState( + vk::StencilOp::eKeep, vk::StencilOp::eKeep, vk::StencilOp::eKeep, vk::CompareOp::eAlways ); + vk::PipelineDepthStencilStateCreateInfo pipelineDepthStencilStateCreateInfo( + {}, true, true, vk::CompareOp::eLessOrEqual, false, false, stencilOpState, stencilOpState ); + + vk::ColorComponentFlags colorComponentFlags( vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | + vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA ); + vk::PipelineColorBlendAttachmentState pipelineColorBlendAttachmentState( false, + vk::BlendFactor::eZero, + vk::BlendFactor::eZero, + vk::BlendOp::eAdd, + vk::BlendFactor::eZero, + vk::BlendFactor::eZero, + vk::BlendOp::eAdd, + colorComponentFlags ); + vk::PipelineColorBlendStateCreateInfo pipelineColorBlendStateCreateInfo( + {}, false, vk::LogicOp::eNoOp, pipelineColorBlendAttachmentState, { { 1.0f, 1.0f, 1.0f, 1.0f } } ); + + std::array dynamicStates = { vk::DynamicState::eViewport, vk::DynamicState::eScissor }; + vk::PipelineDynamicStateCreateInfo pipelineDynamicStateCreateInfo( {}, dynamicStates ); + + vk::GraphicsPipelineCreateInfo graphicsPipelineCreateInfo( vk::PipelineCreateFlagBits::eAllowDerivatives, + pipelineShaderStageCreateInfos, + &pipelineVertexInputStateCreateInfo, + &pipelineInputAssemblyStateCreateInfo, + nullptr, + &pipelineViewportStateCreateInfo, + &pipelineRasterizationStateCreateInfo, + &pipelineMultisampleStateCreateInfo, + &pipelineDepthStencilStateCreateInfo, + &pipelineColorBlendStateCreateInfo, + &pipelineDynamicStateCreateInfo, + **pipelineLayout, + **renderPass ); + + std::unique_ptr basePipeline = + vk::raii::su::make_unique( *device, *pipelineCache, graphicsPipelineCreateInfo ); + switch ( basePipeline->getConstructorSuccessCode() ) + { + case vk::Result::eSuccess: break; + case vk::Result::ePipelineCompileRequiredEXT: + // something meaningfull here + break; + default: assert( false ); // should never happen + } + + // Now create the derivative pipeline, using a different fragment shader + // This shader will shade the cube faces with interpolated colors + const std::string fragmentShaderText_T_C_2 = R"( +#version 450 + +layout (location = 0) in vec2 inTexCoord; + +layout (location = 0) out vec4 outColor; + +void main() +{ + outColor = vec4(inTexCoord.x, inTexCoord.y, 1.0f - inTexCoord.x - inTexCoord.y, 1.0f); +} +)"; + + // Convert GLSL to SPIR-V + glslang::InitializeProcess(); + std::unique_ptr fragmentShaderModule2 = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C_2 ); + glslang::FinalizeProcess(); + + // Modify pipeline info to reflect derivation + pipelineShaderStageCreateInfos[1] = + vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eFragment, **fragmentShaderModule2, "main" ); + graphicsPipelineCreateInfo.flags = vk::PipelineCreateFlagBits::eDerivative; + graphicsPipelineCreateInfo.basePipelineHandle = **basePipeline; + graphicsPipelineCreateInfo.basePipelineIndex = -1; + + // And create the derived pipeline + std::unique_ptr derivedPipeline = + vk::raii::su::make_unique( *device, *pipelineCache, graphicsPipelineCreateInfo ); + switch ( derivedPipeline->getConstructorSuccessCode() ) + { + case vk::Result::eSuccess: break; + case vk::Result::ePipelineCompileRequiredEXT: + // something meaningfull here + break; + default: assert( false ); // should never happen + } + + /* VULKAN_KEY_END */ + + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + + // Get the index of the next available swapchain image + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + + commandBuffer->beginRenderPass( + vk::RenderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D(), surfaceData.extent ), clearValues ), + vk::SubpassContents::eInline ); + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **derivedPipeline ); + commandBuffer->bindDescriptorSets( vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, {} ); + + commandBuffer->bindVertexBuffers( 0, { **vertexBufferData.buffer }, { 0 } ); + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + commandBuffer->draw( 12 * 3, 1, 0, 0 ); + commandBuffer->endRenderPass(); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + + while ( vk::Result::eTimeout == device->waitForFences( { **drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/PushConstants/CMakeLists.txt b/RAII_Samples/PushConstants/CMakeLists.txt new file mode 100644 index 0000000..a6bebf4 --- /dev/null +++ b/RAII_Samples/PushConstants/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_PushConstants) + +set(HEADERS +) + +set(SOURCES + PushConstants.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_PushConstants + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_PushConstants PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_PushConstants PRIVATE utils) diff --git a/RAII_Samples/PushConstants/PushConstants.cpp b/RAII_Samples/PushConstants/PushConstants.cpp new file mode 100644 index 0000000..4d8665b --- /dev/null +++ b/RAII_Samples/PushConstants/PushConstants.cpp @@ -0,0 +1,280 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : PushConstants +// Use push constants in a simple shader, validate the correct value was read. + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wmissing-braces" +#elif defined( __GNUC__ ) +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan.hpp" + +#include +#include + +static char const * AppName = "PushConstants"; +static char const * EngineName = "Vulkan.hpp"; + +const std::string fragmentShaderText = R"( +#version 400 + +#extension GL_ARB_separate_shader_objects : enable +#extension GL_ARB_shading_language_420pack : enable + +layout (push_constant) uniform pushBlock +{ + int iFoo; + float fBar; +} pushConstantsBlock; + +layout (location = 0) in vec2 inTexCoords; + +layout (location = 0) out vec4 outColor; + +void main() +{ + vec4 green = vec4(0.0f, 1.0f, 0.0f, 1.0f); + vec4 red = vec4(1.0f, 0.0f, 0.0f, 1.0f); + + // Start with passing color + vec4 resColor = green; + + // See if we've read in the correct push constants + if ((pushConstantsBlock.iFoo != 2) || (pushConstantsBlock.fBar != 1.0f)) + { + resColor = red; + } + + // Create a border to see the cube more easily + if ((inTexCoords.x < 0.01f) || (0.99f < inTexCoords.x) + || (inTexCoords.y < 0.01f) || (0.99f < inTexCoords.y)) + { + resColor *= vec4(0.1f, 0.1f, 0.1f, 1.0f); + } + + outColor = resColor; +} +)"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcMatrix ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, depthBufferData.format ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + vk::raii::su::BufferData vertexBufferData( + *physicalDevice, *device, sizeof( texturedCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + vk::raii::su::copyToDevice( + *vertexBufferData.deviceMemory, texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); + + // Create binding and layout for the following, matching contents of shader + // binding 0 = uniform buffer (MVP) + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); + + /* VULKAN_KEY_START */ + + // Set up our push constant range, which mirrors the declaration of + vk::PushConstantRange pushConstantRanges( vk::ShaderStageFlagBits::eFragment, 0, 8 ); + vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo( {}, **descriptorSetLayout, pushConstantRanges ); + std::unique_ptr pipelineLayout = + vk::raii::su::make_unique( *device, pipelineLayoutCreateInfo ); + + // Create a single pool to contain data for our descriptor set + std::array poolSizes = { vk::DescriptorPoolSize( vk::DescriptorType::eUniformBuffer, 1 ), + vk::DescriptorPoolSize( + vk::DescriptorType::eCombinedImageSampler, 1 ) }; + vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo( + vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 1, poolSizes ); + std::unique_ptr descriptorPool = + vk::raii::su::make_unique( *device, descriptorPoolCreateInfo ); + + // Populate descriptor sets + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( **descriptorPool, **descriptorSetLayout ); + std::unique_ptr descriptorSet = vk::raii::su::make_unique( + std::move( vk::raii::DescriptorSets( *device, descriptorSetAllocateInfo ).front() ) ); + + // Populate with info about our uniform buffer for MVP + vk::DescriptorBufferInfo bufferInfo( **uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::WriteDescriptorSet writeDescriptorSet( + **descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ); + device->updateDescriptorSets( writeDescriptorSet, nullptr ); + + // Create our push constant data, which matches shader expectations + std::array pushConstants = { { (unsigned)2, (unsigned)0x3F800000 } }; + + // Ensure we have enough room for push constant data + assert( ( sizeof( pushConstants ) <= physicalDevice->getProperties().limits.maxPushConstantsSize ) && + "Too many push constants" ); + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer->pushConstants( **pipelineLayout, vk::ShaderStageFlagBits::eFragment, 0, pushConstants ); + + /* VULKAN_KEY_END */ + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + std::unique_ptr graphicsPipeline = vk::raii::su::makeUniqueGraphicsPipeline( + *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + sizeof( texturedCubeData[0] ), + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + vk::FrontFace::eClockwise, + true, + *pipelineLayout, + *renderPass ); + + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + + vk::RenderPassBeginInfo renderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + commandBuffer->bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, nullptr ); + + commandBuffer->bindVertexBuffers( 0, { **vertexBufferData.buffer }, { 0 } ); + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + commandBuffer->draw( 12 * 3, 1, 0, 0 ); + commandBuffer->endRenderPass(); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + + while ( vk::Result::eTimeout == device->waitForFences( { **drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/PushDescriptors/CMakeLists.txt b/RAII_Samples/PushDescriptors/CMakeLists.txt new file mode 100644 index 0000000..6178ad1 --- /dev/null +++ b/RAII_Samples/PushDescriptors/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_PushDescriptors) + +set(HEADERS +) + +set(SOURCES + PushDescriptors.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_PushDescriptors + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_PushDescriptors PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_PushDescriptors PRIVATE utils) diff --git a/RAII_Samples/PushDescriptors/PushDescriptors.cpp b/RAII_Samples/PushDescriptors/PushDescriptors.cpp new file mode 100644 index 0000000..aa7ce31 --- /dev/null +++ b/RAII_Samples/PushDescriptors/PushDescriptors.cpp @@ -0,0 +1,247 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : PushDescriptors +// Use Push Descriptors to Draw Textured Cube + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan.hpp" + +#include +#include + +static char const * AppName = "PushDescriptors"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + + /* VULKAN_KEY_START */ + + // To use PUSH_DESCRIPTOR, you must also specify GET_PHYSICAL_DEVICE_PROPERTIES_2 + std::vector extensionProperties = context->enumerateInstanceExtensionProperties(); + auto propertyIterator = + std::find_if( extensionProperties.begin(), extensionProperties.end(), []( vk::ExtensionProperties ep ) { + return ( strcmp( ep.extensionName, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME ) == 0 ); + } ); + if ( propertyIterator == extensionProperties.end() ) + { + std::cout << "No GET_PHYSICAL_DEVICE_PROPERTIES_2 extension" << std::endl; + return 0; + } + + std::vector instanceExtensions = vk::su::getInstanceExtensions(); + instanceExtensions.push_back( VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME ); + + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, instanceExtensions ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + // Once instance is created, need to make sure the extension is available + extensionProperties = physicalDevice->enumerateDeviceExtensionProperties(); + propertyIterator = + std::find_if( extensionProperties.begin(), extensionProperties.end(), []( vk::ExtensionProperties ep ) { + return ( strcmp( ep.extensionName, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME ) == 0 ); + } ); + if ( propertyIterator == extensionProperties.end() ) + { + std::cout << "No extension for push descriptors" << std::endl; + return 0; + } + + std::vector deviceExtensions = vk::su::getDeviceExtensions(); + deviceExtensions.push_back( VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = + vk::raii::su::makeUniqueDevice( *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, deviceExtensions ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + vk::raii::su::TextureData textureData( *physicalDevice, *device ); + + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + textureData.setImage( *commandBuffer, vk::su::CheckerboardImageGenerator() ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcMatrix ); + + // Need to specify that descriptor set layout will be for push descriptors + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, + { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, + { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } }, + vk::DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR ); + std::unique_ptr pipelineLayout = + vk::raii::su::makeUniquePipelineLayout( *device, *descriptorSetLayout ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, depthBufferData.format ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + vk::raii::su::BufferData vertexBufferData( + *physicalDevice, *device, sizeof( texturedCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + vk::raii::su::copyToDevice( + *vertexBufferData.deviceMemory, texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + std::unique_ptr graphicsPipeline = vk::raii::su::makeUniqueGraphicsPipeline( + *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + sizeof( texturedCubeData[0] ), + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + vk::FrontFace::eClockwise, + true, + *pipelineLayout, + *renderPass ); + + // Get the index of the next available swapchain image: + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + vk::RenderPassBeginInfo renderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + + vk::DescriptorBufferInfo bufferInfo( **uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::DescriptorImageInfo imageInfo( + **textureData.sampler, **textureData.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::WriteDescriptorSet writeDescriptorSets[2] = { + vk::WriteDescriptorSet( {}, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), + vk::WriteDescriptorSet( {}, 1, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo ) + }; + + // this call is from an extension and needs the dynamic dispatcher !! + commandBuffer->pushDescriptorSetKHR( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { 2, writeDescriptorSets } ); + + commandBuffer->bindVertexBuffers( 0, { **vertexBufferData.buffer }, { 0 } ); + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + commandBuffer->draw( 12 * 3, 1, 0, 0 ); + commandBuffer->endRenderPass(); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + + while ( vk::Result::eTimeout == device->waitForFences( { **drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + /* VULKAN_KEY_END */ + + device->waitIdle(); + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/RayTracing/CMakeLists.txt b/RAII_Samples/RayTracing/CMakeLists.txt new file mode 100644 index 0000000..e06bd58 --- /dev/null +++ b/RAII_Samples/RayTracing/CMakeLists.txt @@ -0,0 +1,43 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_RayTracing) + +set(HEADERS + CameraManipulator.hpp +) + +set(SOURCES + CameraManipulator.cpp + RayTracing.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_RayTracing + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_RayTracing PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_RayTracing PRIVATE utils) +target_include_directories(RAII_RayTracing PUBLIC + ${CMAKE_SOURCE_DIR}/samples/RayTracing/common + ${CMAKE_SOURCE_DIR}/samples/RayTracing/vulkannv + ${CMAKE_SOURCE_DIR}/stb + ${CMAKE_SOURCE_DIR}/tinyobjloader + ) diff --git a/RAII_Samples/RayTracing/CameraManipulator.cpp b/RAII_Samples/RayTracing/CameraManipulator.cpp new file mode 100644 index 0000000..1ce8348 --- /dev/null +++ b/RAII_Samples/RayTracing/CameraManipulator.cpp @@ -0,0 +1,439 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// ignore warning 4127: conditional expression is constant +#if defined( _MSC_VER ) +# pragma warning( disable : 4127 ) +#elif defined( __clang__ ) +# if ( 10 <= __clang_major__ ) +# pragma clang diagnostic ignored "-Wdeprecated-volatile" // to keep glm/detail/type_half.inl compiling +# endif +#elif defined( __GNUC__ ) +// don't know how to switch off that warning here +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "CameraManipulator.hpp" + +#include +#include + +namespace vk +{ + namespace su + { + const float trackballSize = 0.8f; + + //----------------------------------------------------------------------------- + // MATH functions + // + template + bool isZero( const T & _a ) + { + return fabs( _a ) < std::numeric_limits::epsilon(); + } + template + bool isOne( const T & _a ) + { + return areEqual( _a, (T)1 ); + } + inline float sign( float s ) + { + return ( s < 0.f ) ? -1.f : 1.f; + } + + CameraManipulator::CameraManipulator() + { + update(); + } + + glm::vec3 const & CameraManipulator::getCameraPosition() const + { + return m_cameraPosition; + } + + glm::vec3 const & CameraManipulator::getCenterPosition() const + { + return m_centerPosition; + } + + glm::mat4 const & CameraManipulator::getMatrix() const + { + return m_matrix; + } + + CameraManipulator::Mode CameraManipulator::getMode() const + { + return m_mode; + } + + glm::ivec2 const & CameraManipulator::getMousePosition() const + { + return m_mousePosition; + } + + float CameraManipulator::getRoll() const + { + return m_roll; + } + + float CameraManipulator::getSpeed() const + { + return m_speed; + } + + glm::vec3 const & CameraManipulator::getUpVector() const + { + return m_upVector; + } + + glm::u32vec2 const & CameraManipulator::getWindowSize() const + { + return m_windowSize; + } + + CameraManipulator::Action + CameraManipulator::mouseMove( glm::ivec2 const & position, MouseButton mouseButton, ModifierFlags & modifiers ) + { + Action curAction = Action::None; + switch ( mouseButton ) + { + case MouseButton::Left: + if ( ( ( modifiers & ModifierFlagBits::Ctrl ) && ( modifiers & ModifierFlagBits::Shift ) ) || + ( modifiers & ModifierFlagBits::Alt ) ) + { + curAction = m_mode == Mode::Examine ? Action::LookAround : Action::Orbit; + } + else if ( modifiers & ModifierFlagBits::Shift ) + { + curAction = Action::Dolly; + } + else if ( modifiers & ModifierFlagBits::Ctrl ) + { + curAction = Action::Pan; + } + else + { + curAction = m_mode == Mode::Examine ? Action::Orbit : Action::LookAround; + } + break; + case MouseButton::Middle: curAction = Action::Pan; break; + case MouseButton::Right: curAction = Action::Dolly; break; + default: assert( false ); + } + assert( curAction != Action::None ); + motion( position, curAction ); + + return curAction; + } + + void CameraManipulator::setLookat( const glm::vec3 & cameraPosition, + const glm::vec3 & centerPosition, + const glm::vec3 & upVector ) + { + m_cameraPosition = cameraPosition; + m_centerPosition = centerPosition; + m_upVector = upVector; + update(); + } + + void CameraManipulator::setMode( Mode mode ) + { + m_mode = mode; + } + + void CameraManipulator::setMousePosition( glm::ivec2 const & position ) + { + m_mousePosition = position; + } + + void CameraManipulator::setRoll( float roll ) + { + m_roll = roll; + update(); + } + + void CameraManipulator::setSpeed( float speed ) + { + m_speed = speed; + } + + void CameraManipulator::setWindowSize( glm::ivec2 const & size ) + { + m_windowSize = size; + } + + void CameraManipulator::wheel( int value ) + { + float fValue = static_cast( value ); + float dx = ( fValue * std::abs( fValue ) ) / static_cast( m_windowSize[0] ); + + glm::vec3 z = m_cameraPosition - m_centerPosition; + float length = z.length() * 0.1f; + length = length < 0.001f ? 0.001f : length; + + dx *= m_speed; + dolly( glm::vec2( dx, dx ) ); + update(); + } + + void CameraManipulator::dolly( glm::vec2 const & delta ) + { + glm::vec3 z = m_centerPosition - m_cameraPosition; + float length = glm::length( z ); + + // We are at the point of interest, and don't know any direction, so do nothing! + if ( isZero( length ) ) + { + return; + } + + // Use the larger movement. + float dd; + if ( m_mode != Mode::Examine ) + { + dd = -delta[1]; + } + else + { + dd = fabs( delta[0] ) > fabs( delta[1] ) ? delta[0] : -delta[1]; + } + + float factor = m_speed * dd / length; + + // Adjust speed based on distance. + length /= 10; + length = length < 0.001f ? 0.001f : length; + factor *= length; + + // Don't move to or through the point of interest. + if ( 1.0f <= factor ) + { + return; + } + + z *= factor; + + // Not going up + if ( m_mode == Mode::Walk ) + { + if ( m_upVector.y > m_upVector.z ) + { + z.y = 0; + } + else + { + z.z = 0; + } + } + + m_cameraPosition += z; + + // In fly mode, the interest moves with us. + if ( m_mode != Mode::Examine ) + { + m_centerPosition += z; + } + } + + void CameraManipulator::motion( glm::ivec2 const & position, Action action ) + { + glm::vec2 delta( float( position[0] - m_mousePosition[0] ) / float( m_windowSize[0] ), + float( position[1] - m_mousePosition[1] ) / float( m_windowSize[1] ) ); + + switch ( action ) + { + case Action::Orbit: + if ( m_mode == Mode::Trackball ) + { + orbit( delta, true ); // trackball(position); + } + else + { + orbit( delta, false ); + } + break; + case Action::Dolly: dolly( delta ); break; + case Action::Pan: pan( delta ); break; + case Action::LookAround: + if ( m_mode == Mode::Trackball ) + { + trackball( position ); + } + else + { + orbit( glm::vec2( delta[0], -delta[1] ), true ); + } + break; + default: break; + } + + update(); + + m_mousePosition = position; + } + + void CameraManipulator::orbit( glm::vec2 const & delta, bool invert ) + { + if ( isZero( delta[0] ) && isZero( delta[1] ) ) + { + return; + } + + // Full width will do a full turn + float dx = delta[0] * float( glm::two_pi() ); + float dy = delta[1] * float( glm::two_pi() ); + + // Get the camera + glm::vec3 origin( invert ? m_cameraPosition : m_centerPosition ); + glm::vec3 position( invert ? m_centerPosition : m_cameraPosition ); + + // Get the length of sight + glm::vec3 centerToEye( position - origin ); + float radius = glm::length( centerToEye ); + centerToEye = glm::normalize( centerToEye ); + + // Find the rotation around the UP axis (Y) + glm::vec3 zAxis( centerToEye ); + glm::mat4 yRotation = glm::rotate( -dx, m_upVector ); + + // Apply the (Y) rotation to the eye-center vector + glm::vec4 tmpVector = yRotation * glm::vec4( centerToEye.x, centerToEye.y, centerToEye.z, 0.0f ); + centerToEye = glm::vec3( tmpVector.x, tmpVector.y, tmpVector.z ); + + // Find the rotation around the X vector: cross between eye-center and up (X) + glm::vec3 xAxis = glm::cross( m_upVector, zAxis ); + xAxis = glm::normalize( xAxis ); + glm::mat4 xRotation = glm::rotate( -dy, xAxis ); + + // Apply the (X) rotation to the eye-center vector + tmpVector = xRotation * glm::vec4( centerToEye.x, centerToEye.y, centerToEye.z, 0 ); + glm::vec3 rotatedVector( tmpVector.x, tmpVector.y, tmpVector.z ); + if ( sign( rotatedVector.x ) == sign( centerToEye.x ) ) + { + centerToEye = rotatedVector; + } + + // Make the vector as long as it was originally + centerToEye *= radius; + + // Finding the new position + glm::vec3 newPosition = centerToEye + origin; + + if ( !invert ) + { + m_cameraPosition = newPosition; // Normal: change the position of the camera + } + else + { + m_centerPosition = newPosition; // Inverted: change the interest point + } + } + + void CameraManipulator::pan( glm::vec2 const & delta ) + { + glm::vec3 z( m_cameraPosition - m_centerPosition ); + float length = static_cast( glm::length( z ) ) / 0.785f; // 45 degrees + z = glm::normalize( z ); + glm::vec3 x = glm::normalize( glm::cross( m_upVector, z ) ); + glm::vec3 y = glm::normalize( glm::cross( z, x ) ); + x *= -delta[0] * length; + y *= delta[1] * length; + + if ( m_mode == Mode::Fly ) + { + x = -x; + y = -y; + } + + m_cameraPosition += x + y; + m_centerPosition += x + y; + } + + double CameraManipulator::projectOntoTBSphere( const glm::vec2 & p ) + { + double z; + double d = length( p ); + if ( d < trackballSize * 0.70710678118654752440 ) + { + // inside sphere + z = sqrt( trackballSize * trackballSize - d * d ); + } + else + { + // on hyperbola + double t = trackballSize / 1.41421356237309504880; + z = t * t / d; + } + + return z; + } + + void CameraManipulator::trackball( glm::ivec2 const & position ) + { + glm::vec2 p0( 2 * ( m_mousePosition[0] - m_windowSize[0] / 2 ) / double( m_windowSize[0] ), + 2 * ( m_windowSize[1] / 2 - m_mousePosition[1] ) / double( m_windowSize[1] ) ); + glm::vec2 p1( 2 * ( position[0] - m_windowSize[0] / 2 ) / double( m_windowSize[0] ), + 2 * ( m_windowSize[1] / 2 - position[1] ) / double( m_windowSize[1] ) ); + + // determine the z coordinate on the sphere + glm::vec3 pTB0( p0[0], p0[1], projectOntoTBSphere( p0 ) ); + glm::vec3 pTB1( p1[0], p1[1], projectOntoTBSphere( p1 ) ); + + // calculate the rotation axis via cross product between p0 and p1 + glm::vec3 axis = glm::cross( pTB0, pTB1 ); + axis = glm::normalize( axis ); + + // calculate the angle + float t = glm::length( pTB0 - pTB1 ) / ( 2.f * trackballSize ); + + // clamp between -1 and 1 + if ( t > 1.0f ) + { + t = 1.0f; + } + else if ( t < -1.0f ) + { + t = -1.0f; + } + + float rad = 2.0f * asin( t ); + + { + glm::vec4 rot_axis = m_matrix * glm::vec4( axis, 0 ); + glm::mat4 rot_mat = glm::rotate( rad, glm::vec3( rot_axis.x, rot_axis.y, rot_axis.z ) ); + + glm::vec3 pnt = m_cameraPosition - m_centerPosition; + glm::vec4 pnt2 = rot_mat * glm::vec4( pnt.x, pnt.y, pnt.z, 1 ); + m_cameraPosition = m_centerPosition + glm::vec3( pnt2.x, pnt2.y, pnt2.z ); + glm::vec4 up2 = rot_mat * glm::vec4( m_upVector.x, m_upVector.y, m_upVector.z, 0 ); + m_upVector = glm::vec3( up2.x, up2.y, up2.z ); + } + } + + void CameraManipulator::update() + { + m_matrix = glm::lookAt( m_cameraPosition, m_centerPosition, m_upVector ); + + if ( !isZero( m_roll ) ) + { + glm::mat4 rot = glm::rotate( m_roll, glm::vec3( 0, 0, 1 ) ); + m_matrix = m_matrix * rot; + } + } + + } // namespace su +} // namespace vk diff --git a/RAII_Samples/RayTracing/CameraManipulator.hpp b/RAII_Samples/RayTracing/CameraManipulator.hpp new file mode 100644 index 0000000..5de9daa --- /dev/null +++ b/RAII_Samples/RayTracing/CameraManipulator.hpp @@ -0,0 +1,89 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#pragma once + +#if defined( _MSC_VER ) +# pragma warning( push ) +# pragma warning( disable : 4127 ) // conditional expression is constant (glm) +#endif + +#include + +#if defined( _MSC_VER ) +# pragma warning( pop ) +#endif + +#include + +namespace vk +{ + namespace su + { + class CameraManipulator + { + public: + enum class Action { None, Orbit, Dolly, Pan, LookAround }; + enum class Mode { Examine, Fly, Walk, Trackball }; + enum class MouseButton { None, Left, Middle, Right }; + enum class ModifierFlagBits : uint32_t { Shift = 1, Ctrl = 2, Alt = 4 }; + using ModifierFlags = vk::Flags; + + public: + CameraManipulator(); + + glm::vec3 const& getCameraPosition() const; + glm::vec3 const& getCenterPosition() const; + glm::mat4 const& getMatrix() const; + Mode getMode() const; + glm::ivec2 const& getMousePosition() const; + float getRoll() const; + float getSpeed() const; + glm::vec3 const& getUpVector() const; + glm::u32vec2 const& getWindowSize() const; + Action mouseMove(glm::ivec2 const& position, MouseButton mouseButton, ModifierFlags & modifiers); + void setLookat(const glm::vec3& cameraPosition, const glm::vec3& centerPosition, const glm::vec3& upVector); + void setMode(Mode mode); + void setMousePosition(glm::ivec2 const& position); + void setRoll(float roll); // roll in radians + void setSpeed(float speed); + void setWindowSize(glm::ivec2 const& size); + void wheel(int value); + + private: + void dolly(glm::vec2 const& delta); + void motion(glm::ivec2 const& position, Action action = Action::None); + void orbit(glm::vec2 const& delta, bool invert = false); + void pan(glm::vec2 const& delta); + double projectOntoTBSphere(const glm::vec2& p); + void trackball(glm::ivec2 const& position); + void update(); + + private: + glm::vec3 m_cameraPosition = glm::vec3(10, 10, 10); + glm::vec3 m_centerPosition = glm::vec3(0, 0, 0); + glm::vec3 m_upVector = glm::vec3(0, 1, 0); + float m_roll = 0; // Rotation around the Z axis in RAD + glm::mat4 m_matrix = glm::mat4(1); + + glm::u32vec2 m_windowSize = glm::u32vec2(1, 1); + + float m_speed = 30.0f; + glm::ivec2 m_mousePosition = glm::ivec2(0, 0); + + Mode m_mode = Mode::Examine; + }; + } // namespace su +} // namespace vk diff --git a/RAII_Samples/RayTracing/RayTracing.cpp b/RAII_Samples/RayTracing/RayTracing.cpp new file mode 100644 index 0000000..d55cfd5 --- /dev/null +++ b/RAII_Samples/RayTracing/RayTracing.cpp @@ -0,0 +1,1390 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : RayTracing +// Simple sample how to ray trace using Vulkan + +#if defined( _MSC_VER ) +# pragma warning( disable : 4201 ) // disable warning C4201: nonstandard extension used: nameless struct/union; needed + // to get glm/detail/type_vec?.hpp without warnings +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wmissing-braces" +# if ( 10 <= __clang_major__ ) +# pragma clang diagnostic ignored "-Wdeprecated-volatile" // to keep glm/detail/type_half.inl compiling +# endif +#elif defined( __GNUC__ ) +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +// clang-format off +// we need to include vulkan.hpp before glfw3.h, so stop clang-format to reorder them +#include +#include +// clang-format on +#include +#include +#include + +#define GLM_FORCE_DEPTH_ZERO_TO_ONE +#define GLM_FORCE_RADIANS +#define GLM_ENABLE_EXPERIMENTAL +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "CameraManipulator.hpp" +#include "SPIRV/GlslangToSpv.h" + +#include +#include +#include + +static char const * AppName = "RayTracing"; +static char const * EngineName = "Vulkan.hpp"; + +struct GeometryInstanceData +{ + GeometryInstanceData( glm::mat4x4 const & transform_, + uint32_t instanceID_, + uint8_t mask_, + uint32_t instanceOffset_, + uint8_t flags_, + uint64_t accelerationStructureHandle_ ) + : instanceId( instanceID_ ) + , mask( mask_ ) + , instanceOffset( instanceOffset_ ) + , flags( flags_ ) + , accelerationStructureHandle( accelerationStructureHandle_ ) + { + assert( !( instanceID_ & 0xFF000000 ) && !( instanceOffset_ & 0xFF000000 ) ); + memcpy( transform, &transform_, 12 * sizeof( float ) ); + } + + float transform[12]; // Transform matrix, containing only the top 3 rows + uint32_t instanceId : 24; // Instance index + uint32_t mask : 8; // Visibility mask + uint32_t instanceOffset : 24; // Index of the hit group which will be invoked when a ray hits the instance + uint32_t flags : 8; // Instance flags, such as culling + uint64_t accelerationStructureHandle; // Opaque handle of the bottom-level acceleration structure +}; +static_assert( sizeof( GeometryInstanceData ) == 64, "GeometryInstanceData structure compiles to incorrect size" ); + +struct AccelerationStructureData +{ + std::shared_ptr acclerationStructure; + std::unique_ptr scratchBufferData; + std::unique_ptr resultBufferData; + std::unique_ptr instanceBufferData; +}; + +AccelerationStructureData createAccelerationStructureData( + vk::raii::PhysicalDevice const & physicalDevice, + vk::raii::Device const & device, + vk::raii::CommandBuffer const & commandBuffer, + std::vector, glm::mat4x4>> const & instances, + std::vector const & geometries ) +{ + assert( instances.empty() ^ geometries.empty() ); + + AccelerationStructureData accelerationStructureData; + + vk::AccelerationStructureTypeNV accelerationStructureType = + instances.empty() ? vk::AccelerationStructureTypeNV::eBottomLevel : vk::AccelerationStructureTypeNV::eTopLevel; + vk::AccelerationStructureInfoNV accelerationStructureInfo( + accelerationStructureType, {}, vk::su::checked_cast( instances.size() ), geometries ); + vk::AccelerationStructureCreateInfoNV accelerationStructureCreateInfoNV( 0, accelerationStructureInfo ); + accelerationStructureData.acclerationStructure = + std::make_shared( device, accelerationStructureCreateInfoNV ); + + vk::AccelerationStructureMemoryRequirementsInfoNV objectRequirements( + vk::AccelerationStructureMemoryRequirementsTypeNV::eObject, **accelerationStructureData.acclerationStructure ); + vk::DeviceSize resultSizeInBytes = + device.getAccelerationStructureMemoryRequirementsNV( objectRequirements ).memoryRequirements.size; + assert( 0 < resultSizeInBytes ); + accelerationStructureData.resultBufferData = + vk::raii::su::make_unique( physicalDevice, + device, + resultSizeInBytes, + vk::BufferUsageFlagBits::eRayTracingNV, + vk::MemoryPropertyFlagBits::eDeviceLocal ); + + vk::AccelerationStructureMemoryRequirementsInfoNV buildScratchRequirements( + vk::AccelerationStructureMemoryRequirementsTypeNV::eBuildScratch, + **accelerationStructureData.acclerationStructure ); + vk::AccelerationStructureMemoryRequirementsInfoNV updateScratchRequirements( + vk::AccelerationStructureMemoryRequirementsTypeNV::eUpdateScratch, + **accelerationStructureData.acclerationStructure ); + vk::DeviceSize scratchSizeInBytes = std::max( + device.getAccelerationStructureMemoryRequirementsNV( buildScratchRequirements ).memoryRequirements.size, + device.getAccelerationStructureMemoryRequirementsNV( updateScratchRequirements ).memoryRequirements.size ); + assert( 0 < scratchSizeInBytes ); + + accelerationStructureData.scratchBufferData = + vk::raii::su::make_unique( physicalDevice, + device, + scratchSizeInBytes, + vk::BufferUsageFlagBits::eRayTracingNV, + vk::MemoryPropertyFlagBits::eDeviceLocal ); + + if ( !instances.empty() ) + { + accelerationStructureData.instanceBufferData = + vk::raii::su::make_unique( physicalDevice, + device, + instances.size() * sizeof( GeometryInstanceData ), + vk::BufferUsageFlagBits::eRayTracingNV ); + + std::vector geometryInstanceData; + for ( size_t i = 0; i < instances.size(); i++ ) + { + uint64_t accelerationStructureHandle = instances[i].first->getHandle(); + + // For each instance we set its instance index to its index i in the instance vector, and set + // its hit group index to 2*i. The hit group index defines which entry of the shader binding + // table will contain the hit group to be executed when hitting this instance. We set this + // index to 2*i due to the use of 2 types of rays in the scene: the camera rays and the shadow + // rays. For each instance, the SBT will then have 2 hit groups + geometryInstanceData.emplace_back( glm::transpose( instances[i].second ), + static_cast( i ), + static_cast( 0xFF ), + static_cast( 2 * i ), + static_cast( vk::GeometryInstanceFlagBitsNV::eTriangleCullDisable ), + accelerationStructureHandle ); + } + accelerationStructureData.instanceBufferData->upload( geometryInstanceData ); + } + + device.bindAccelerationStructureMemoryNV( vk::BindAccelerationStructureMemoryInfoNV( + **accelerationStructureData.acclerationStructure, **accelerationStructureData.resultBufferData->deviceMemory ) ); + + vk::Buffer instanceData; + if ( accelerationStructureData.instanceBufferData ) + { + instanceData = **accelerationStructureData.instanceBufferData->buffer; + } + vk::AccelerationStructureInfoNV accelerationStructureInfoNV( + accelerationStructureType, {}, vk::su::checked_cast( instances.size() ), geometries ); + commandBuffer.buildAccelerationStructureNV( accelerationStructureInfoNV, + instanceData, + 0, + false, + **accelerationStructureData.acclerationStructure, + nullptr, + **accelerationStructureData.scratchBufferData->buffer, + 0 ); + + vk::MemoryBarrier memoryBarrier( + vk::AccessFlagBits::eAccelerationStructureWriteNV | vk::AccessFlagBits::eAccelerationStructureReadNV, + vk::AccessFlagBits::eAccelerationStructureWriteNV | vk::AccessFlagBits::eAccelerationStructureReadNV ); + commandBuffer.pipelineBarrier( vk::PipelineStageFlagBits::eAccelerationStructureBuildNV, + vk::PipelineStageFlagBits::eAccelerationStructureBuildNV, + {}, + memoryBarrier, + {}, + {} ); + + return accelerationStructureData; +} + +struct PerFrameData +{ + std::unique_ptr commandPool; + std::unique_ptr commandBuffer; + std::unique_ptr fence; + std::unique_ptr presentCompleteSemaphore; + std::unique_ptr renderCompleteSemaphore; +}; + +struct UniformBufferObject +{ + glm::mat4 model; + glm::mat4 view; + glm::mat4 proj; + glm::mat4 modelIT; + glm::mat4 viewInverse; + glm::mat4 projInverse; +}; + +struct Material +{ + glm::vec3 diffuse = glm::vec3( 0.7f, 0.7f, 0.7f ); + int textureID = -1; +}; +const size_t MaterialStride = ( ( sizeof( Material ) + 15 ) / 16 ) * 16; + +struct Vertex +{ + Vertex( glm::vec3 const & p, glm::vec3 const & n, glm::vec2 const & tc, int m = 0 ) + : pos( p ), nrm( n ), texCoord( tc ), matID( m ) + {} + + glm::vec3 pos; + glm::vec3 nrm; + glm::vec2 texCoord; + int matID; +}; +const size_t VertexStride = ( ( sizeof( Vertex ) + 15 ) / 16 ) * 16; + +static const std::vector cubeData = { + // pos nrm texcoord matID + // front face + { Vertex( glm::vec3( -1.0f, -1.0f, 1.0f ), glm::vec3( 0.0f, 0.0f, 1.0f ), glm::vec2( 0.0f, 0.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, -1.0f, 1.0f ), glm::vec3( 0.0f, 0.0f, 1.0f ), glm::vec2( 1.0f, 0.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, 1.0f, 1.0f ), glm::vec3( 0.0f, 0.0f, 1.0f ), glm::vec2( 1.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, 1.0f, 1.0f ), glm::vec3( 0.0f, 0.0f, 1.0f ), glm::vec2( 1.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, 1.0f, 1.0f ), glm::vec3( 0.0f, 0.0f, 1.0f ), glm::vec2( 0.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, -1.0f, 1.0f ), glm::vec3( 0.0f, 0.0f, 1.0f ), glm::vec2( 0.0f, 0.0f ), 0 ) }, + // back face + { Vertex( glm::vec3( 1.0f, -1.0f, -1.0f ), glm::vec3( 0.0f, 0.0f, -1.0f ), glm::vec2( 0.0f, 0.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, -1.0f, -1.0f ), glm::vec3( 0.0f, 0.0f, -1.0f ), glm::vec2( 1.0f, 0.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, 1.0f, -1.0f ), glm::vec3( 0.0f, 0.0f, -1.0f ), glm::vec2( 1.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, 1.0f, -1.0f ), glm::vec3( 0.0f, 0.0f, -1.0f ), glm::vec2( 1.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, 1.0f, -1.0f ), glm::vec3( 0.0f, 0.0f, -1.0f ), glm::vec2( 0.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, -1.0f, -1.0f ), glm::vec3( 0.0f, 0.0f, -1.0f ), glm::vec2( 0.0f, 0.0f ), 0 ) }, + // left face + { Vertex( glm::vec3( -1.0f, -1.0f, -1.0f ), glm::vec3( -1.0f, 0.0f, 0.0f ), glm::vec2( 0.0f, 0.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, -1.0f, 1.0f ), glm::vec3( -1.0f, 0.0f, 0.0f ), glm::vec2( 1.0f, 0.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, 1.0f, 1.0f ), glm::vec3( -1.0f, 0.0f, 0.0f ), glm::vec2( 1.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, 1.0f, 1.0f ), glm::vec3( -1.0f, 0.0f, 0.0f ), glm::vec2( 1.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, 1.0f, -1.0f ), glm::vec3( -1.0f, 0.0f, 0.0f ), glm::vec2( 0.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, -1.0f, -1.0f ), glm::vec3( -1.0f, 0.0f, 0.0f ), glm::vec2( 0.0f, 0.0f ), 0 ) }, + // right face + { Vertex( glm::vec3( 1.0f, -1.0f, 1.0f ), glm::vec3( 1.0f, 0.0f, 0.0f ), glm::vec2( 0.0f, 0.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, -1.0f, -1.0f ), glm::vec3( 1.0f, 0.0f, 0.0f ), glm::vec2( 1.0f, 0.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, 1.0f, -1.0f ), glm::vec3( 1.0f, 0.0f, 0.0f ), glm::vec2( 1.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, 1.0f, -1.0f ), glm::vec3( 1.0f, 0.0f, 0.0f ), glm::vec2( 1.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, 1.0f, 1.0f ), glm::vec3( 1.0f, 0.0f, 0.0f ), glm::vec2( 0.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, -1.0f, 1.0f ), glm::vec3( 1.0f, 0.0f, 0.0f ), glm::vec2( 0.0f, 0.0f ), 0 ) }, + // top face + { Vertex( glm::vec3( -1.0f, 1.0f, 1.0f ), glm::vec3( 0.0f, 1.0f, 0.0f ), glm::vec2( 0.0f, 0.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, 1.0f, 1.0f ), glm::vec3( 0.0f, 1.0f, 0.0f ), glm::vec2( 1.0f, 0.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, 1.0f, -1.0f ), glm::vec3( 0.0f, 1.0f, 0.0f ), glm::vec2( 1.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, 1.0f, -1.0f ), glm::vec3( 0.0f, 1.0f, 0.0f ), glm::vec2( 1.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, 1.0f, -1.0f ), glm::vec3( 0.0f, 1.0f, 0.0f ), glm::vec2( 0.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, 1.0f, 1.0f ), glm::vec3( 0.0f, 1.0f, 0.0f ), glm::vec2( 0.0f, 0.0f ), 0 ) }, + // bottom face + { Vertex( glm::vec3( -1.0f, -1.0f, -1.0f ), glm::vec3( 0.0f, -1.0f, 0.0f ), glm::vec2( 0.0f, 0.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, -1.0f, -1.0f ), glm::vec3( 0.0f, -1.0f, 0.0f ), glm::vec2( 1.0f, 0.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, -1.0f, 1.0f ), glm::vec3( 0.0f, -1.0f, 0.0f ), glm::vec2( 1.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( 1.0f, -1.0f, 1.0f ), glm::vec3( 0.0f, -1.0f, 0.0f ), glm::vec2( 1.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, -1.0f, 1.0f ), glm::vec3( 0.0f, -1.0f, 0.0f ), glm::vec2( 0.0f, 1.0f ), 0 ) }, + { Vertex( glm::vec3( -1.0f, -1.0f, -1.0f ), glm::vec3( 0.0f, -1.0f, 0.0f ), glm::vec2( 0.0f, 0.0f ), 0 ) }, +}; + +static std::string vertexShaderText = R"( +#version 450 + +#extension GL_ARB_separate_shader_objects : enable + +layout(binding = 0) uniform UniformBufferObject +{ + mat4 model; + mat4 view; + mat4 proj; + mat4 modelIT; +} ubo; + +layout(location = 0) in vec3 inPosition; +layout(location = 1) in vec3 inNormal; +layout(location = 2) in vec2 inTexCoord; +layout(location = 3) in int inMatID; + +layout(location = 0) flat out int outMatID; +layout(location = 1) out vec2 outTexCoord; +layout(location = 2) out vec3 outNormal; + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +void main() +{ + gl_Position = ubo.proj * ubo.view * ubo.model * vec4(inPosition, 1.0); + outMatID = inMatID; + outTexCoord = inTexCoord; + outNormal = vec3(ubo.modelIT * vec4(inNormal, 0.0)); +} +)"; + +static std::string fragmentShaderText = R"( +#version 450 + +#extension GL_ARB_separate_shader_objects : enable +#extension GL_EXT_nonuniform_qualifier : enable + +layout(location = 0) flat in int matIndex; +layout(location = 1) in vec2 texCoord; +layout(location = 2) in vec3 normal; + +struct Material +{ + vec3 diffuse; + int textureID; +}; +const int sizeofMat = 1; + +layout(binding = 1) buffer MaterialBufferObject { vec4[] m; } materials; +layout(binding = 2) uniform sampler2D[] textureSamplers; + +Material unpackMaterial() +{ + Material m; + vec4 d0 = materials.m[sizeofMat * matIndex + 0]; + + m.diffuse = d0.xyz; + m.textureID = floatBitsToInt(d0.w); + + return m; +} + +layout(location = 0) out vec4 outColor; + +void main() +{ + vec3 lightVector = normalize(vec3(5, 4, 3)); + + float dot_product = max(dot(lightVector, normalize(normal)), 0.2); + + Material m = unpackMaterial(); + vec3 c = m.diffuse; + if (0 <= m.textureID) + { + c *= texture(textureSamplers[m.textureID], texCoord).xyz; + } + c *= dot_product; + + outColor = vec4(c, 1); +} +)"; + +static std::string raygenShaderText = R"( +#version 460 + +#extension GL_NV_ray_tracing : require + +layout(binding = 0, set = 0) uniform accelerationStructureNV topLevelAS; +layout(binding = 1, set = 0, rgba8) uniform image2D image; + +layout(binding=2, set = 0) uniform UniformBufferObject +{ + mat4 model; + mat4 view; + mat4 proj; + mat4 modelIT; + mat4 viewInverse; + mat4 projInverse; +} cam; + +layout(location = 0) rayPayloadNV vec3 hitValue; + +void main() +{ + const vec2 pixelCenter = vec2(gl_LaunchIDNV.xy) + vec2(0.5); + const vec2 inUV = pixelCenter/vec2(gl_LaunchSizeNV.xy); + vec2 d = inUV * 2.0 - 1.0; + + vec4 origin = cam.viewInverse*vec4(0,0,0,1); + vec4 target = cam.projInverse * vec4(d.x, d.y, 1, 1) ; + vec4 direction = cam.viewInverse*vec4(normalize(target.xyz), 0) ; + + uint rayFlags = gl_RayFlagsOpaqueNV; + uint cullMask = 0xff; + float tmin = 0.001; + float tmax = 10000.0; + + traceNV(topLevelAS, rayFlags, cullMask, 0 /*sbtRecordOffset*/, 0 /*sbtRecordStride*/, 0 /*missIndex*/, origin.xyz, tmin, direction.xyz, tmax, 0 /*payload*/); + imageStore(image, ivec2(gl_LaunchIDNV.xy), vec4(hitValue, 0.0)); +} +)"; + +static std::string missShaderText = R"( +#version 460 + +#extension GL_NV_ray_tracing : require + +layout(location = 0) rayPayloadInNV vec3 hitValue; + +void main() +{ + hitValue = vec3(0.0, 0.1, 0.3); +} +)"; + +static std::string shadowMissShaderText = R"( +#version 460 + +#extension GL_NV_ray_tracing : require + +layout(location = 2) rayPayloadInNV bool isShadowed; + +void main() +{ + isShadowed = false; +})"; + +static std::string closestHitShaderText = R"( +#version 460 + +#extension GL_NV_ray_tracing : require +#extension GL_EXT_nonuniform_qualifier : enable + +layout(location = 0) rayPayloadInNV vec3 hitValue; +layout(location = 2) rayPayloadNV bool isShadowed; + +hitAttributeNV vec3 attribs; +layout(binding = 0, set = 0) uniform accelerationStructureNV topLevelAS; + +layout(binding = 3, set = 0) buffer Vertices { vec4 v[]; } vertices; +layout(binding = 4, set = 0) buffer Indices { uint i[]; } indices; + +layout(binding = 5, set = 0) buffer MatColorBufferObject { vec4[] m; } materials; +layout(binding = 6, set = 0) uniform sampler2D[] textureSamplers; + +struct Vertex +{ + vec3 pos; + vec3 nrm; + vec2 texCoord; + int matIndex; +}; +// Number of vec4 values used to represent a vertex +uint vertexSize = 3; + +Vertex unpackVertex(uint index) +{ + Vertex v; + + vec4 d0 = vertices.v[vertexSize * index + 0]; + vec4 d1 = vertices.v[vertexSize * index + 1]; + vec4 d2 = vertices.v[vertexSize * index + 2]; + + v.pos = d0.xyz; + v.nrm = vec3(d0.w, d1.xy); + v.texCoord = d1.zw; + v.matIndex = floatBitsToInt(d2.x); + return v; +} + +struct Material +{ + vec3 diffuse; + int textureID; +}; +// Number of vec4 values used to represent a material +const int sizeofMat = 1; + +Material unpackMaterial(int matIndex) +{ + Material m; + vec4 d0 = materials.m[sizeofMat * matIndex + 0]; + + m.diffuse = d0.xyz; + m.textureID = floatBitsToInt(d0.w); + return m; +} + +void main() +{ + ivec3 ind = ivec3(indices.i[3 * gl_PrimitiveID], indices.i[3 * gl_PrimitiveID + 1], indices.i[3 * gl_PrimitiveID + 2]); + + Vertex v0 = unpackVertex(ind.x); + Vertex v1 = unpackVertex(ind.y); + Vertex v2 = unpackVertex(ind.z); + + const vec3 barycentrics = vec3(1.0 - attribs.x - attribs.y, attribs.x, attribs.y); + + vec3 normal = normalize(v0.nrm * barycentrics.x + v1.nrm * barycentrics.y + v2.nrm * barycentrics.z); + + vec3 lightVector = normalize(vec3(5, 4, 3)); + + float dot_product = max(dot(lightVector, normal), 0.2); + + Material mat = unpackMaterial(v1.matIndex); + + vec3 c = dot_product * mat.diffuse; + if (0 <= mat.textureID) + { + vec2 texCoord = v0.texCoord * barycentrics.x + v1.texCoord * barycentrics.y + v2.texCoord * barycentrics.z; + c *= texture(textureSamplers[mat.textureID], texCoord).xyz; + } + float tmin = 0.001; + float tmax = 100.0; + vec3 origin = gl_WorldRayOriginNV + gl_WorldRayDirectionNV * gl_HitTNV; + isShadowed = true; + traceNV(topLevelAS, gl_RayFlagsTerminateOnFirstHitNV|gl_RayFlagsOpaqueNV|gl_RayFlagsSkipClosestHitShaderNV, 0xFF, 1 /* sbtRecordOffset */, 0 /* sbtRecordStride */, 1 /* missIndex */, origin, + tmin, lightVector, tmax, 2 /*payload location*/); + hitValue = c; + if (isShadowed) + { + hitValue *= 0.3f; + } +} +)"; + +#ifndef IMGUI_VK_QUEUED_FRAMES +# define IMGUI_VK_QUEUED_FRAMES 2 +#endif // !IMGUI_VK_QUEUED_FRAMES + +struct AppInfo +{ + vk::su::CameraManipulator cameraManipulator; + bool useRasterRender = false; +}; + +static void check_vk_result( VkResult err ) +{ + if ( err != 0 ) + { + std::cerr << AppName << ": Vulkan error " << vk::to_string( static_cast( err ) ); + if ( err < 0 ) + { + abort(); + } + } +} + +static void cursorPosCallback( GLFWwindow * window, double mouseX, double mouseY ) +{ + vk::su::CameraManipulator::MouseButton mouseButton = + ( glfwGetMouseButton( window, GLFW_MOUSE_BUTTON_LEFT ) == GLFW_PRESS ) + ? vk::su::CameraManipulator::MouseButton::Left + : ( glfwGetMouseButton( window, GLFW_MOUSE_BUTTON_MIDDLE ) == GLFW_PRESS ) + ? vk::su::CameraManipulator::MouseButton::Middle + : ( glfwGetMouseButton( window, GLFW_MOUSE_BUTTON_RIGHT ) == GLFW_PRESS ) + ? vk::su::CameraManipulator::MouseButton::Right + : vk::su::CameraManipulator::MouseButton::None; + if ( mouseButton != vk::su::CameraManipulator::MouseButton::None ) + { + vk::su::CameraManipulator::ModifierFlags modifiers; + if ( glfwGetKey( window, GLFW_KEY_LEFT_ALT ) == GLFW_PRESS ) + { + modifiers |= vk::su::CameraManipulator::ModifierFlagBits::Alt; + } + if ( glfwGetKey( window, GLFW_KEY_LEFT_CONTROL ) == GLFW_PRESS ) + { + modifiers |= vk::su::CameraManipulator::ModifierFlagBits::Ctrl; + } + if ( glfwGetKey( window, GLFW_KEY_LEFT_SHIFT ) == GLFW_PRESS ) + { + modifiers |= vk::su::CameraManipulator::ModifierFlagBits::Shift; + } + + vk::su::CameraManipulator & cameraManipulator = + reinterpret_cast( glfwGetWindowUserPointer( window ) )->cameraManipulator; + cameraManipulator.mouseMove( + glm::ivec2( static_cast( mouseX ), static_cast( mouseY ) ), mouseButton, modifiers ); + } +} + +static void errorCallback( int error, const char * description ) +{ + fprintf( stderr, "GLFW Error %d: %s\n", error, description ); +} + +static void framebufferSizeCallback( GLFWwindow * window, int w, int h ) +{ + vk::su::CameraManipulator & cameraManipulator = + reinterpret_cast( glfwGetWindowUserPointer( window ) )->cameraManipulator; + cameraManipulator.setWindowSize( glm::ivec2( w, h ) ); +} + +static void keyCallback( GLFWwindow * window, int key, int /*scancode*/, int action, int /*mods*/ ) +{ + if ( action == GLFW_PRESS ) + { + switch ( key ) + { + case GLFW_KEY_ESCAPE: + case 'Q': glfwSetWindowShouldClose( window, 1 ); break; + case 'R': + { + AppInfo * appInfo = reinterpret_cast( glfwGetWindowUserPointer( window ) ); + appInfo->useRasterRender = !appInfo->useRasterRender; + } + break; + } + } +} + +static void mouseButtonCallback( GLFWwindow * window, int /*button*/, int /*action*/, int /*mods*/ ) +{ + double xpos, ypos; + glfwGetCursorPos( window, &xpos, &ypos ); + + vk::su::CameraManipulator & cameraManipulator = + reinterpret_cast( glfwGetWindowUserPointer( window ) )->cameraManipulator; + cameraManipulator.setMousePosition( glm::ivec2( static_cast( xpos ), static_cast( ypos ) ) ); +} + +static void scrollCallback( GLFWwindow * window, double /*xoffset*/, double yoffset ) +{ + vk::su::CameraManipulator & cameraManipulator = + reinterpret_cast( glfwGetWindowUserPointer( window ) )->cameraManipulator; + cameraManipulator.wheel( static_cast( yoffset ) ); +} + +// random data and functions +static std::random_device randomDevice; +static std::mt19937 randomGenerator( randomDevice() ); + +template +T random( T minValue = std::numeric_limits::min(), T maxValue = std::numeric_limits::max() ) +{ + static_assert( std::numeric_limits::is_integer, "Type T needs to be an integral type!\n" ); + std::uniform_int_distribution<> randomDistribution( minValue, maxValue ); + + return static_cast( randomDistribution( randomGenerator ) ); +} + +glm::vec3 randomVec3( float minValue, float maxValue ) +{ + std::uniform_real_distribution randomDistribution( minValue, maxValue ); + + return glm::vec3( randomDistribution( randomGenerator ), + randomDistribution( randomGenerator ), + randomDistribution( randomGenerator ) ); +} + +uint32_t roundUp( uint32_t value, uint32_t alignment ) +{ + return ( ( value + alignment - 1 ) / alignment ) * alignment; +} + +int main( int /*argc*/, char ** /*argv*/ ) +{ + // number of cubes in x-, y-, and z-direction + const size_t xMax = 10; + const size_t yMax = 10; + const size_t zMax = 10; + + AppInfo appInfo; + + try + { + // Setup glfw + glfwSetErrorCallback( errorCallback ); + if ( !glfwInit() ) + { + std::cerr << AppName << ": can't initialize glfw!\n"; + return 1; + } + if ( !glfwVulkanSupported() ) + { + std::cerr << AppName << ": Vulkan not supported!\n"; + return 1; + } + + // create a window using glfw + glfwWindowHint( GLFW_CLIENT_API, GLFW_NO_API ); + vk::Extent2D windowExtent( 1280, 720 ); + GLFWwindow * window = glfwCreateWindow( windowExtent.width, windowExtent.height, AppName, nullptr, nullptr ); + + // install some callbacks + glfwSetCursorPosCallback( window, cursorPosCallback ); + glfwSetFramebufferSizeCallback( window, framebufferSizeCallback ); + glfwSetKeyCallback( window, keyCallback ); + glfwSetMouseButtonCallback( window, mouseButtonCallback ); + glfwSetScrollCallback( window, scrollCallback ); + + // Setup camera and make it available as the userPointer in the glfw window + appInfo.cameraManipulator.setWindowSize( glm::u32vec2( windowExtent.width, windowExtent.height ) ); + glm::vec3 diagonal = + 3.0f * glm::vec3( static_cast( xMax ), static_cast( yMax ), static_cast( zMax ) ); + appInfo.cameraManipulator.setLookat( 1.5f * diagonal, 0.5f * diagonal, glm::vec3( 0, 1, 0 ) ); + glfwSetWindowUserPointer( window, &appInfo ); + + // Create Vulkan Instance with needed extensions + uint32_t glfwExtensionsCount; + const char ** glfwExtensions = glfwGetRequiredInstanceExtensions( &glfwExtensionsCount ); + std::vector instanceExtensions; + instanceExtensions.reserve( glfwExtensionsCount + 1 ); + for ( uint32_t i = 0; i < glfwExtensionsCount; i++ ) + { + instanceExtensions.push_back( glfwExtensions[i] ); + } + instanceExtensions.push_back( VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME ); + + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, instanceExtensions ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + // Create Window Surface (using glfw) + VkSurfaceKHR glfwSurface; + VkResult err = glfwCreateWindowSurface( static_cast( **instance ), window, nullptr, &glfwSurface ); + check_vk_result( err ); + std::unique_ptr surface = vk::raii::su::make_unique( *instance, glfwSurface ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surface ); + + // Create a Device with ray tracing support (besides some other extensions needed) and needed features + auto supportedFeatures = + physicalDevice->getFeatures2(); + std::unique_ptr device = + vk::raii::su::makeUniqueDevice( *physicalDevice, + graphicsAndPresentQueueFamilyIndex.first, + { VK_KHR_SWAPCHAIN_EXTENSION_NAME, + VK_NV_RAY_TRACING_EXTENSION_NAME, + VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME }, + &supportedFeatures.get().features, + &supportedFeatures.get() ); + + // setup stuff per frame + std::array perFrameData; + for ( int i = 0; i < IMGUI_VK_QUEUED_FRAMES; i++ ) + { + perFrameData[i].commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + perFrameData[i].commandBuffer = vk::raii::su::makeUniqueCommandBuffer( *device, *perFrameData[i].commandPool ); + perFrameData[i].fence = + vk::raii::su::make_unique( *device, vk::FenceCreateInfo( vk::FenceCreateFlagBits::eSignaled ) ); + perFrameData[i].presentCompleteSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + perFrameData[i].renderCompleteSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + } + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + // create a descriptor pool with a number of available descriptors + std::vector poolSizes = { + { vk::DescriptorType::eCombinedImageSampler, 1000 }, + { vk::DescriptorType::eUniformBuffer, 1000 }, + { vk::DescriptorType::eStorageBuffer, 1000 }, + }; + std::unique_ptr descriptorPool = + vk::raii::su::makeUniqueDescriptorPool( *device, poolSizes ); + + // setup swap chain, render pass, depth buffer and the frame buffers + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surface, + windowExtent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eStorage, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + vk::SurfaceFormatKHR surfaceFormat = vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surface ) ); + vk::Format depthFormat = vk::raii::su::pickDepthFormat( *physicalDevice ); + + // setup a render pass + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, surfaceFormat.format, depthFormat ); + + vk::raii::su::DepthBufferData depthBufferData( *physicalDevice, *device, depthFormat, windowExtent ); + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, windowExtent ); + + bool samplerAnisotropy = !!supportedFeatures.get().features.samplerAnisotropy; + + // create some simple checkerboard textures, randomly sized and colored + const size_t textureCount = 10; + std::vector textures; + textures.reserve( textureCount ); + for ( size_t i = 0; i < textureCount; i++ ) + { + textures.emplace_back( *physicalDevice, + *device, + vk::Extent2D( random( 2, 8 ) * 16, random( 2, 8 ) * 16 ), + vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled, + vk::FormatFeatureFlags(), + samplerAnisotropy, + true ); + } + vk::raii::su::oneTimeSubmit( + *device, *perFrameData[0].commandPool, *graphicsQueue, [&]( vk::raii::CommandBuffer const & commandBuffer ) { + for ( auto & t : textures ) + { + t.setImage( + commandBuffer, + vk::su::CheckerboardImageGenerator( { random(), random(), random() }, + { random(), random(), random() } ) ); + } + } ); + + // create some materials with a random diffuse color, referencing one of the above textures + const size_t materialCount = 10; + assert( materialCount == textureCount ); + std::vector materials( materialCount ); + for ( size_t i = 0; i < materialCount; i++ ) + { + materials[i].diffuse = randomVec3( 0.0f, 1.0f ); + materials[i].textureID = vk::su::checked_cast( i ); + } + vk::raii::su::BufferData materialBufferData( + *physicalDevice, *device, materialCount * MaterialStride, vk::BufferUsageFlagBits::eStorageBuffer ); + materialBufferData.upload( materials, MaterialStride ); + + // create a a 3D-array of cubes, randomly jittered, using a random material + std::vector vertices; + vertices.reserve( xMax * yMax * zMax * cubeData.size() ); + for ( size_t x = 0; x < xMax; x++ ) + { + for ( size_t y = 0; y < yMax; y++ ) + { + for ( size_t z = 0; z < zMax; z++ ) + { + int m = random( 0, materialCount - 1 ); + glm::vec3 jitter = randomVec3( 0.0f, 0.6f ); + for ( auto const & v : cubeData ) + { + vertices.push_back( v ); + vertices.back().pos += + 3.0f * glm::vec3( static_cast( x ), static_cast( y ), static_cast( z ) ) + jitter; + vertices.back().matID = static_cast( m ); + } + } + } + } + + // create an 1-1 index buffer + std::vector indices( vertices.size() ); + std::iota( indices.begin(), indices.end(), 0 ); + + // there's just one vertex- and one index-buffer, but with more complex scene loaders there might be more! + vk::BufferUsageFlags bufferUsageFlags = vk::BufferUsageFlagBits::eTransferDst | + vk::BufferUsageFlagBits::eVertexBuffer | + vk::BufferUsageFlagBits::eStorageBuffer; + vk::raii::su::BufferData vertexBufferData( *physicalDevice, + *device, + vertices.size() * VertexStride, + bufferUsageFlags, + vk::MemoryPropertyFlagBits::eDeviceLocal ); + vertexBufferData.upload( + *physicalDevice, *device, *perFrameData[0].commandPool, *graphicsQueue, vertices, VertexStride ); + + bufferUsageFlags = vk::BufferUsageFlagBits::eTransferDst | vk::BufferUsageFlagBits::eIndexBuffer | + vk::BufferUsageFlagBits::eStorageBuffer; + vk::raii::su::BufferData indexBufferData( *physicalDevice, + *device, + indices.size() * sizeof( uint32_t ), + bufferUsageFlags, + vk::MemoryPropertyFlagBits::eDeviceLocal ); + indexBufferData.upload( + *physicalDevice, *device, *perFrameData[0].commandPool, *graphicsQueue, indices, sizeof( uint32_t ) ); + + // clang-format off + glm::mat4x4 transform( glm::mat4x4( 1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f ) ); + // clang-format on + + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, + { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, + { vk::DescriptorType::eStorageBuffer, + 1, + vk::ShaderStageFlagBits::eVertex | vk::ShaderStageFlagBits::eFragment }, + { vk::DescriptorType::eCombinedImageSampler, + static_cast( textures.size() ), + vk::ShaderStageFlagBits::eFragment } } ); + std::unique_ptr pipelineLayout = + vk::raii::su::makeUniquePipelineLayout( *device, *descriptorSetLayout ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText ); + glslang::FinalizeProcess(); + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + std::unique_ptr graphicsPipeline = vk::raii::su::makeUniqueGraphicsPipeline( + *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + VertexStride, + { { vk::Format::eR32G32B32Sfloat, vk::su::checked_cast( offsetof( Vertex, pos ) ) }, + { vk::Format::eR32G32B32Sfloat, vk::su::checked_cast( offsetof( Vertex, nrm ) ) }, + { vk::Format::eR32G32Sfloat, vk::su::checked_cast( offsetof( Vertex, texCoord ) ) }, + { vk::Format::eR32Sint, vk::su::checked_cast( offsetof( Vertex, matID ) ) } }, + vk::FrontFace::eCounterClockwise, + true, + *pipelineLayout, + *renderPass ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( UniformBufferObject ), vk::BufferUsageFlagBits::eUniformBuffer ); + + std::unique_ptr descriptorSet = + vk::raii::su::makeUniqueDescriptorSet( *device, *descriptorPool, *descriptorSetLayout ); + vk::raii::su::updateDescriptorSets( *device, + *descriptorSet, + { { vk::DescriptorType::eUniformBuffer, *uniformBufferData.buffer, {} }, + { vk::DescriptorType::eStorageBuffer, *materialBufferData.buffer, {} } }, + textures ); + + // RayTracing specific stuff + + // create acceleration structures: one top-level, and just one bottom-level + AccelerationStructureData topLevelAS, bottomLevelAS; + vk::raii::su::oneTimeSubmit( + *device, *perFrameData[0].commandPool, *graphicsQueue, [&]( vk::raii::CommandBuffer const & commandBuffer ) { + vk::GeometryDataNV geometryDataNV( vk::GeometryTrianglesNV( **vertexBufferData.buffer, + 0, + vk::su::checked_cast( vertices.size() ), + VertexStride, + vk::Format::eR32G32B32Sfloat, + **indexBufferData.buffer, + 0, + vk::su::checked_cast( indices.size() ), + vk::IndexType::eUint32 ), + {} ); + bottomLevelAS = + createAccelerationStructureData( *physicalDevice, + *device, + commandBuffer, + {}, + { vk::GeometryNV( vk::GeometryTypeNV::eTriangles, geometryDataNV ) } ); + + topLevelAS = + createAccelerationStructureData( *physicalDevice, + *device, + commandBuffer, + { std::make_pair( bottomLevelAS.acclerationStructure, transform ) }, + std::vector() ); + } ); + + // create raytracing descriptor set + vk::raii::su::oneTimeSubmit( + *device, *perFrameData[0].commandPool, *graphicsQueue, [&]( vk::raii::CommandBuffer const & commandBuffer ) { + vk::BufferMemoryBarrier bufferMemoryBarrier( {}, + vk::AccessFlagBits::eShaderRead, + VK_QUEUE_FAMILY_IGNORED, + VK_QUEUE_FAMILY_IGNORED, + **vertexBufferData.buffer, + 0, + VK_WHOLE_SIZE ); + commandBuffer.pipelineBarrier( vk::PipelineStageFlagBits::eAllCommands, + vk::PipelineStageFlagBits::eAllCommands, + {}, + nullptr, + bufferMemoryBarrier, + nullptr ); + + bufferMemoryBarrier.buffer = **indexBufferData.buffer; + commandBuffer.pipelineBarrier( vk::PipelineStageFlagBits::eAllCommands, + vk::PipelineStageFlagBits::eAllCommands, + {}, + nullptr, + bufferMemoryBarrier, + nullptr ); + } ); + + std::vector bindings; + bindings.emplace_back( 0, + vk::DescriptorType::eAccelerationStructureNV, + 1, + vk::ShaderStageFlagBits::eRaygenNV | vk::ShaderStageFlagBits::eClosestHitNV ); + bindings.emplace_back( + 1, vk::DescriptorType::eStorageImage, 1, vk::ShaderStageFlagBits::eRaygenNV ); // raytracing output + bindings.emplace_back( + 2, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eRaygenNV ); // camera information + bindings.emplace_back( + 3, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eClosestHitNV ); // vertex buffer + bindings.emplace_back( + 4, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eClosestHitNV ); // index buffer + bindings.emplace_back( + 5, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eClosestHitNV ); // material buffer + bindings.emplace_back( 6, + vk::DescriptorType::eCombinedImageSampler, + vk::su::checked_cast( textures.size() ), + vk::ShaderStageFlagBits::eClosestHitNV ); // textures + + std::vector descriptorPoolSizes; + descriptorPoolSizes.reserve( bindings.size() ); + for ( const auto & b : bindings ) + { + descriptorPoolSizes.emplace_back( + b.descriptorType, vk::su::checked_cast( swapChainData.images.size() ) * b.descriptorCount ); + } + vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo( + vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, + vk::su::checked_cast( swapChainData.images.size() ), + descriptorPoolSizes ); + std::unique_ptr rayTracingDescriptorPool = + vk::raii::su::make_unique( *device, descriptorPoolCreateInfo ); + vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo( {}, bindings ); + std::unique_ptr rayTracingDescriptorSetLayout = + vk::raii::su::make_unique( *device, descriptorSetLayoutCreateInfo ); + std::vector layouts; + for ( size_t i = 0; i < swapChainData.images.size(); i++ ) + { + layouts.push_back( **rayTracingDescriptorSetLayout ); + } + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( **rayTracingDescriptorPool, layouts ); + vk::raii::DescriptorSets rayTracingDescriptorSets( *device, descriptorSetAllocateInfo ); + + // Bind ray tracing specific descriptor sets into pNext of a vk::WriteDescriptorSet + vk::WriteDescriptorSetAccelerationStructureNV writeDescriptorSetAcceleration( 1, + &**topLevelAS.acclerationStructure ); + std::vector accelerationDescriptionSets; + for ( size_t i = 0; i < rayTracingDescriptorSets.size(); i++ ) + { + accelerationDescriptionSets.emplace_back( *rayTracingDescriptorSets[i], 0, 0, 1, bindings[0].descriptorType ); + accelerationDescriptionSets.back().pNext = &writeDescriptorSetAcceleration; + } + device->updateDescriptorSets( accelerationDescriptionSets, nullptr ); + + // Bind all the other buffers and images, starting with dstBinding == 2 (dstBinding == 1 is used by the backBuffer + // view) + for ( size_t i = 0; i < rayTracingDescriptorSets.size(); i++ ) + { + vk::raii::su::updateDescriptorSets( *device, + rayTracingDescriptorSets[i], + { { bindings[2].descriptorType, *uniformBufferData.buffer, {} }, + { bindings[3].descriptorType, *vertexBufferData.buffer, {} }, + { bindings[4].descriptorType, *indexBufferData.buffer, {} }, + { bindings[5].descriptorType, *materialBufferData.buffer, {} } }, + textures, + 2 ); + } + + // create the ray-tracing shader modules + glslang::InitializeProcess(); + std::unique_ptr raygenShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eRaygenNV, raygenShaderText ); + std::unique_ptr missShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eMissNV, missShaderText ); + std::unique_ptr shadowMissShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eMissNV, shadowMissShaderText ); + std::unique_ptr closestHitShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eClosestHitNV, closestHitShaderText ); + glslang::FinalizeProcess(); + + // create the ray tracing pipeline + std::vector shaderStages; + std::vector shaderGroups; + + // We use only one ray generation, that will implement the camera model + shaderStages.emplace_back( + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eRaygenNV, **raygenShaderModule, "main" ); + shaderGroups.emplace_back( + vk::RayTracingShaderGroupTypeNV::eGeneral, 0, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV ); + + // The first miss shader is used to look-up the environment in case the rays from the camera miss the geometry + shaderStages.emplace_back( + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eMissNV, **missShaderModule, "main" ); + shaderGroups.emplace_back( + vk::RayTracingShaderGroupTypeNV::eGeneral, 1, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV ); + + // The second miss shader is invoked when a shadow ray misses the geometry. It simply indicates that no occlusion + // has been found + shaderStages.emplace_back( + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eMissNV, **shadowMissShaderModule, "main" ); + shaderGroups.emplace_back( + vk::RayTracingShaderGroupTypeNV::eGeneral, 2, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV ); + + // The first hit group defines the shaders invoked when a ray shot from the camera hit the geometry. In this case we + // only specify the closest hit shader, and rely on the build-in triangle intersection and pass-through any-hit + // shader. However, explicit intersection and any hit shaders could be added as well. + shaderStages.emplace_back( + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eClosestHitNV, **closestHitShaderModule, "main" ); + shaderGroups.emplace_back( vk::RayTracingShaderGroupTypeNV::eTrianglesHitGroup, + VK_SHADER_UNUSED_NV, + 3, + VK_SHADER_UNUSED_NV, + VK_SHADER_UNUSED_NV ); + + // The second hit group defines the shaders invoked when a shadow ray hits the geometry. For simple shadows we do + // not need any shader in that group: we will rely on initializing the payload and update it only in the miss shader + shaderGroups.emplace_back( vk::RayTracingShaderGroupTypeNV::eTrianglesHitGroup, + VK_SHADER_UNUSED_NV, + VK_SHADER_UNUSED_NV, + VK_SHADER_UNUSED_NV, + VK_SHADER_UNUSED_NV ); + + // Create the layout of the pipeline following the provided descriptor set layout + std::unique_ptr rayTracingPipelineLayout = + vk::raii::su::makeUniquePipelineLayout( *device, *rayTracingDescriptorSetLayout ); + + // Assemble the shader stages and recursion depth info into the raytracing pipeline + // The ray tracing process can shoot rays from the camera, and a shadow ray can be shot from the + // hit points of the camera rays, hence a recursion level of 2. This number should be kept as low + // as possible for performance reasons. Even recursive ray tracing should be flattened into a loop + // in the ray generation to avoid deep recursion. + uint32_t maxRecursionDepth = 2; + vk::RayTracingPipelineCreateInfoNV rayTracingPipelineCreateInfo( + {}, shaderStages, shaderGroups, maxRecursionDepth, **rayTracingPipelineLayout ); + std::unique_ptr rayTracingPipeline = + vk::raii::su::make_unique( *device, nullptr, rayTracingPipelineCreateInfo ); + switch ( rayTracingPipeline->getConstructorSuccessCode() ) + { + case vk::Result::eSuccess: break; + case vk::Result::ePipelineCompileRequiredEXT: + // something meaningfull here + break; + default: assert( false ); // should never happen + } + + vk::StructureChain propertiesChain = + physicalDevice->getProperties2(); + uint32_t shaderGroupBaseAlignment = + propertiesChain.get().shaderGroupBaseAlignment; + uint32_t shaderGroupHandleSize = + propertiesChain.get().shaderGroupHandleSize; + + uint32_t raygenShaderBindingOffset = 0; // starting with raygen + uint32_t raygenShaderTableSize = shaderGroupHandleSize; // one raygen shader + uint32_t missShaderBindingOffset = + raygenShaderBindingOffset + roundUp( raygenShaderTableSize, shaderGroupBaseAlignment ); + uint32_t missShaderBindingStride = shaderGroupHandleSize; + uint32_t missShaderTableSize = 2 * missShaderBindingStride; // two raygen shaders + uint32_t hitShaderBindingOffset = + missShaderBindingOffset + roundUp( missShaderTableSize, shaderGroupBaseAlignment ); + uint32_t hitShaderBindingStride = shaderGroupHandleSize; + uint32_t hitShaderTableSize = 2 * hitShaderBindingStride; // two hit shaders + + uint32_t shaderBindingTableSize = hitShaderBindingOffset + hitShaderTableSize; + std::vector shaderHandleStorage( shaderBindingTableSize ); + memcpy( &shaderHandleStorage[raygenShaderBindingOffset], + rayTracingPipeline->getRayTracingShaderGroupHandlesKHR( 0, 1, raygenShaderTableSize ).data(), + raygenShaderTableSize ); + memcpy( &shaderHandleStorage[missShaderBindingOffset], + rayTracingPipeline->getRayTracingShaderGroupHandlesKHR( 1, 2, missShaderTableSize ).data(), + missShaderTableSize ); + memcpy( &shaderHandleStorage[hitShaderBindingOffset], + rayTracingPipeline->getRayTracingShaderGroupHandlesKHR( 3, 2, hitShaderTableSize ).data(), + hitShaderTableSize ); + assert( shaderHandleStorage.size() == shaderBindingTableSize ); + + vk::raii::su::BufferData shaderBindingTableBufferData( *physicalDevice, + *device, + shaderBindingTableSize, + vk::BufferUsageFlagBits::eTransferDst, + vk::MemoryPropertyFlagBits::eHostVisible ); + shaderBindingTableBufferData.upload( shaderHandleStorage ); + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + + // Main loop + uint32_t frameIndex = 0; + UniformBufferObject uniformBufferObject; + uniformBufferObject.model = glm::mat4( 1 ); + uniformBufferObject.modelIT = glm::inverseTranspose( uniformBufferObject.model ); + + double accumulatedTime{ 0.0 }; + size_t frameCount{ 0 }; + while ( !glfwWindowShouldClose( window ) ) + { + double startTime = glfwGetTime(); + glfwPollEvents(); + + vk::raii::CommandBuffer const & commandBuffer = *perFrameData[frameIndex].commandBuffer; + + int w, h; + glfwGetWindowSize( window, &w, &h ); + if ( ( w != static_cast( windowExtent.width ) ) || ( h != static_cast( windowExtent.height ) ) ) + { + windowExtent.width = w; + windowExtent.height = h; + device->waitIdle(); + swapChainData = + vk::raii::su::SwapChainData( *physicalDevice, + *device, + *surface, + windowExtent, + vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eStorage, + swapChainData.swapChain, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + depthBufferData = vk::raii::su::DepthBufferData( + *physicalDevice, *device, vk::su::pickDepthFormat( **physicalDevice ), windowExtent ); + + vk::raii::su::oneTimeSubmit( + commandBuffer, *graphicsQueue, [&]( vk::raii::CommandBuffer const & commandBuffer ) { + vk::raii::su::setImageLayout( commandBuffer, + **depthBufferData.image, + depthFormat, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eDepthStencilAttachmentOptimal ); + } ); + + framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, windowExtent ); + } + + // update the uniformBufferObject + assert( 0 < windowExtent.height ); + uniformBufferObject.view = appInfo.cameraManipulator.getMatrix(); + uniformBufferObject.proj = glm::perspective( + glm::radians( 65.0f ), windowExtent.width / static_cast( windowExtent.height ), 0.1f, 1000.0f ); + uniformBufferObject.proj[1][1] *= -1; // Inverting Y for Vulkan + uniformBufferObject.viewInverse = glm::inverse( uniformBufferObject.view ); + uniformBufferObject.projInverse = glm::inverse( uniformBufferObject.proj ); + uniformBufferData.upload( uniformBufferObject ); + + // frame begin + vk::Result result; + uint32_t backBufferIndex; + std::tie( result, backBufferIndex ) = swapChainData.swapChain->acquireNextImage( + vk::su::FenceTimeout, **perFrameData[frameIndex].presentCompleteSemaphore ); + assert( result == vk::Result::eSuccess ); + + while ( vk::Result::eTimeout == + device->waitForFences( { **perFrameData[frameIndex].fence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + device->resetFences( { **perFrameData[frameIndex].fence } ); + + commandBuffer.begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlagBits::eOneTimeSubmit ) ); + + if ( appInfo.useRasterRender ) + { + vk::RenderPassBeginInfo renderPassBeginInfo( **renderPass, + **framebuffers[backBufferIndex], + vk::Rect2D( vk::Offset2D( 0, 0 ), windowExtent ), + clearValues ); + commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + commandBuffer.bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, nullptr ); + + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( windowExtent.width ), + static_cast( windowExtent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), windowExtent ) ); + + commandBuffer.bindVertexBuffers( 0, { **vertexBufferData.buffer }, { 0 } ); + commandBuffer.bindIndexBuffer( **indexBufferData.buffer, 0, vk::IndexType::eUint32 ); + commandBuffer.drawIndexed( vk::su::checked_cast( indices.size() ), 1, 0, 0, 0 ); + + commandBuffer.endRenderPass(); + } + else + { + vk::DescriptorImageInfo imageInfo( + nullptr, *swapChainData.imageViews[backBufferIndex], vk::ImageLayout::eGeneral ); + vk::WriteDescriptorSet writeDescriptorSet( + *rayTracingDescriptorSets[backBufferIndex], 1, 0, bindings[1].descriptorType, imageInfo ); + device->updateDescriptorSets( writeDescriptorSet, nullptr ); + + vk::raii::su::setImageLayout( commandBuffer, + static_cast( swapChainData.images[backBufferIndex] ), + surfaceFormat.format, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eGeneral ); + + commandBuffer.bindPipeline( vk::PipelineBindPoint::eRayTracingNV, **rayTracingPipeline ); + + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eRayTracingNV, + **rayTracingPipelineLayout, + 0, + { *rayTracingDescriptorSets[backBufferIndex] }, + nullptr ); + + commandBuffer.traceRaysNV( **shaderBindingTableBufferData.buffer, + raygenShaderBindingOffset, + **shaderBindingTableBufferData.buffer, + missShaderBindingOffset, + missShaderBindingStride, + **shaderBindingTableBufferData.buffer, + hitShaderBindingOffset, + hitShaderBindingStride, + nullptr, + 0, + 0, + windowExtent.width, + windowExtent.height, + 1 ); + + vk::raii::su::setImageLayout( commandBuffer, + static_cast( swapChainData.images[backBufferIndex] ), + surfaceFormat.format, + vk::ImageLayout::eGeneral, + vk::ImageLayout::ePresentSrcKHR ); + } + + // frame end + commandBuffer.end(); + const vk::PipelineStageFlags waitDestinationStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput; + vk::SubmitInfo submitInfo( **perFrameData[frameIndex].presentCompleteSemaphore, + waitDestinationStageMask, + *commandBuffer, + **perFrameData[frameIndex].renderCompleteSemaphore ); + graphicsQueue->submit( submitInfo, **perFrameData[frameIndex].fence ); + vk::PresentInfoKHR presentInfoKHR( + **perFrameData[frameIndex].renderCompleteSemaphore, **swapChainData.swapChain, backBufferIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + frameIndex = ( frameIndex + 1 ) % IMGUI_VK_QUEUED_FRAMES; + + double endTime = glfwGetTime(); + accumulatedTime += endTime - startTime; + ++frameCount; + if ( 1.0 < accumulatedTime ) + { + assert( 0 < frameCount ); + + std::ostringstream oss; + oss << AppName << ": " << vertices.size() << " Vertices " + << ( appInfo.useRasterRender ? "Rastering" : "RayTracing" ) << " ( " << frameCount / accumulatedTime + << " fps)"; + glfwSetWindowTitle( window, oss.str().c_str() ); + + accumulatedTime = 0.0; + frameCount = 0; + } + } + + // Cleanup + device->waitIdle(); + + glfwDestroyWindow( window ); + glfwTerminate(); + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/SecondaryCommandBuffer/CMakeLists.txt b/RAII_Samples/SecondaryCommandBuffer/CMakeLists.txt new file mode 100644 index 0000000..9e38430 --- /dev/null +++ b/RAII_Samples/SecondaryCommandBuffer/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_SecondaryCommandBuffer) + +set(HEADERS +) + +set(SOURCES + SecondaryCommandBuffer.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_SecondaryCommandBuffer + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_SecondaryCommandBuffer PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_SecondaryCommandBuffer PRIVATE utils) diff --git a/RAII_Samples/SecondaryCommandBuffer/SecondaryCommandBuffer.cpp b/RAII_Samples/SecondaryCommandBuffer/SecondaryCommandBuffer.cpp new file mode 100644 index 0000000..6c0bcd4 --- /dev/null +++ b/RAII_Samples/SecondaryCommandBuffer/SecondaryCommandBuffer.cpp @@ -0,0 +1,282 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : SecondaryCommandBuffer +// Draw several cubes using primary and secondary command buffers + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wmissing-braces" +#elif defined( __GNUC__ ) +// no need to ignore any warnings with GCC +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan.hpp" + +#include +#include + +static char const * AppName = "SecondaryCommandBuffer"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcMatrix ); + + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, + { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, + { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); + std::unique_ptr pipelineLayout = + vk::raii::su::makeUniquePipelineLayout( *device, *descriptorSetLayout ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, + colorFormat, + depthBufferData.format, + vk::AttachmentLoadOp::eClear, + vk::ImageLayout::eColorAttachmentOptimal ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + vk::raii::su::BufferData vertexBufferData( + *physicalDevice, *device, sizeof( texturedCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + vk::raii::su::copyToDevice( + *vertexBufferData.deviceMemory, texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + std::unique_ptr graphicsPipeline = vk::raii::su::makeUniqueGraphicsPipeline( + *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + sizeof( texturedCubeData[0] ), + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + vk::FrontFace::eClockwise, + true, + *pipelineLayout, + *renderPass ); + + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + + vk::raii::su::TextureData greenTextureData( *physicalDevice, *device ); + greenTextureData.setImage( *commandBuffer, vk::su::MonochromeImageGenerator( { 118, 185, 0 } ) ); + + vk::raii::su::TextureData checkeredTextureData( *physicalDevice, *device ); + checkeredTextureData.setImage( *commandBuffer, vk::su::CheckerboardImageGenerator() ); + + // create two identical descriptor sets, each with a different texture but identical UBOs + std::unique_ptr descriptorPool = vk::raii::su::makeUniqueDescriptorPool( + *device, { { vk::DescriptorType::eUniformBuffer, 2 }, { vk::DescriptorType::eCombinedImageSampler, 2 } } ); + + std::array layouts = { **descriptorSetLayout, **descriptorSetLayout }; + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( **descriptorPool, layouts ); + vk::raii::DescriptorSets descriptorSets( *device, descriptorSetAllocateInfo ); + assert( descriptorSets.size() == 2 ); + + vk::raii::su::updateDescriptorSets( *device, + descriptorSets[0], + { { vk::DescriptorType::eUniformBuffer, *uniformBufferData.buffer, {} } }, + greenTextureData ); + vk::raii::su::updateDescriptorSets( *device, + descriptorSets[1], + { { vk::DescriptorType::eUniformBuffer, *uniformBufferData.buffer, {} } }, + checkeredTextureData ); + + /* VULKAN_KEY_START */ + + // create four secondary command buffers, for each quadrant of the screen + vk::CommandBufferAllocateInfo commandBufferAllocateInfo( **commandPool, vk::CommandBufferLevel::eSecondary, 4 ); + vk::raii::CommandBuffers secondaryCommandBuffers( *device, commandBufferAllocateInfo ); + + // Get the index of the next available swapchain image: + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + vk::raii::su::setImageLayout( *commandBuffer, + static_cast( swapChainData.images[imageIndex] ), + swapChainData.colorFormat, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eColorAttachmentOptimal ); + + const vk::DeviceSize offset = 0; + vk::Viewport viewport( 0.0f, 0.0f, 200.0f, 200.0f, 0.0f, 1.0f ); + vk::Rect2D scissor( vk::Offset2D( 0, 0 ), vk::Extent2D( surfaceData.extent ) ); + + // now we record four separate command buffers, one for each quadrant of the screen + vk::CommandBufferInheritanceInfo commandBufferInheritanceInfo( **renderPass, 0, **framebuffers[imageIndex] ); + vk::CommandBufferBeginInfo secondaryBeginInfo( vk::CommandBufferUsageFlagBits::eOneTimeSubmit | + vk::CommandBufferUsageFlagBits::eRenderPassContinue, + &commandBufferInheritanceInfo ); + + std::array executeCommandBuffers; + for ( int i = 0; i < 4; i++ ) + { + viewport.x = 25.0f + 250.0f * ( i % 2 ); + viewport.y = 25.0f + 250.0f * ( i / 2 ); + + secondaryCommandBuffers[i].begin( secondaryBeginInfo ); + secondaryCommandBuffers[i].bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + secondaryCommandBuffers[i].bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { *descriptorSets[i == 0 || i == 3] }, nullptr ); + secondaryCommandBuffers[i].bindVertexBuffers( 0, { **vertexBufferData.buffer }, offset ); + secondaryCommandBuffers[i].setViewport( 0, viewport ); + secondaryCommandBuffers[i].setScissor( 0, scissor ); + secondaryCommandBuffers[i].draw( 12 * 3, 1, 0, 0 ); + secondaryCommandBuffers[i].end(); + executeCommandBuffers[i] = *secondaryCommandBuffers[i]; + } + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + + vk::RenderPassBeginInfo renderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + + // specifying VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS means this render pass may ONLY call + // vkCmdExecuteCommands + commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eSecondaryCommandBuffers ); + commandBuffer->executeCommands( executeCommandBuffers ); + commandBuffer->endRenderPass(); + + vk::ImageSubresourceRange imageSubresourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ); + vk::ImageMemoryBarrier prePresentBarrier( vk::AccessFlagBits::eColorAttachmentWrite, + vk::AccessFlagBits::eMemoryRead, + vk::ImageLayout::eColorAttachmentOptimal, + vk::ImageLayout::ePresentSrcKHR, + VK_QUEUE_FAMILY_IGNORED, + VK_QUEUE_FAMILY_IGNORED, + static_cast( swapChainData.images[imageIndex] ), + imageSubresourceRange ); + commandBuffer->pipelineBarrier( vk::PipelineStageFlagBits::eColorAttachmentOutput, + vk::PipelineStageFlagBits::eBottomOfPipe, + vk::DependencyFlags(), + nullptr, + nullptr, + prePresentBarrier ); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + + while ( vk::Result::eTimeout == device->waitForFences( { **drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + + result = presentQueue->presentKHR( vk::PresentInfoKHR( {}, **swapChainData.swapChain, imageIndex, {} ) ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + /* VULKAN_KEY_END */ + + device->waitIdle(); + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/SeparateImageSampler/CMakeLists.txt b/RAII_Samples/SeparateImageSampler/CMakeLists.txt new file mode 100644 index 0000000..c00f669 --- /dev/null +++ b/RAII_Samples/SeparateImageSampler/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_SeparateImageSampler) + +set(HEADERS +) + +set(SOURCES + SeparateImageSampler.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_SeparateImageSampler + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_SeparateImageSampler PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_SeparateImageSampler PRIVATE utils) diff --git a/RAII_Samples/SeparateImageSampler/SeparateImageSampler.cpp b/RAII_Samples/SeparateImageSampler/SeparateImageSampler.cpp new file mode 100644 index 0000000..7ce5444 --- /dev/null +++ b/RAII_Samples/SeparateImageSampler/SeparateImageSampler.cpp @@ -0,0 +1,302 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : SeparateImageSampler +// Use separate image and sampler in descriptor set and shader to draw a textured cube. + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wmissing-braces" +#elif defined( __GNUC__ ) +#else +// unknow compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan.hpp" + +#include +#include + +static char const * AppName = "SeparateImageSampler"; +static char const * EngineName = "Vulkan.hpp"; + +const std::string fragmentShaderTextTS_T_C = R"( +#version 400 + +#extension GL_ARB_separate_shader_objects : enable +#extension GL_ARB_shading_language_420pack : enable + +layout (set = 0, binding = 1) uniform texture2D tex; +layout (set = 0, binding = 2) uniform sampler samp; + +layout (location = 0) in vec2 inTexCoords; + +layout (location = 0) out vec4 outColor; + +void main() +{ + // Combine the selected texture with sampler as a parameter + vec4 resColor = texture(sampler2D(tex, samp), inTexCoords); + + // Create a border to see the cube more easily + if ((inTexCoords.x < 0.01f) || (0.99f < inTexCoords.x) + || (inTexCoords.y < 0.01f) || (0.99f < inTexCoords.y)) + { + resColor *= vec4(0.1f, 0.1f, 0.1f, 1.0f); + } + + outColor = resColor; +} +)"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcMatrix ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, depthBufferData.format ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderTextTS_T_C ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + vk::raii::su::BufferData vertexBufferData( + *physicalDevice, *device, sizeof( texturedCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + vk::raii::su::copyToDevice( + *vertexBufferData.deviceMemory, texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); + + /* VULKAN_KEY_START */ + + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + + // Create the separate image + vk::raii::su::TextureData textureData( *physicalDevice, *device ); + textureData.setImage( *commandBuffer, vk::su::MonochromeImageGenerator( { 118, 185, 0 } ) ); + + // Create the separate sampler + vk::SamplerCreateInfo samplerCreateInfo( {}, + vk::Filter::eNearest, + vk::Filter::eNearest, + vk::SamplerMipmapMode::eNearest, + vk::SamplerAddressMode::eClampToEdge, + vk::SamplerAddressMode::eClampToEdge, + vk::SamplerAddressMode::eClampToEdge, + 0.0f, + false, + 1.0f, + false, + vk::CompareOp::eNever, + 0.0f, + 0.0f, + vk::BorderColor::eFloatOpaqueWhite ); + std::unique_ptr sampler = vk::raii::su::make_unique( *device, samplerCreateInfo ); + + // Create binding and layout for the following, matching contents of shader + // binding 0 = uniform buffer (MVP) + // binding 1 = texture2D + // binding 2 = sampler + std::array resourceBindings = { + { vk::DescriptorSetLayoutBinding( 0, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex ), + vk::DescriptorSetLayoutBinding( 1, vk::DescriptorType::eSampledImage, 1, vk::ShaderStageFlagBits::eFragment ), + vk::DescriptorSetLayoutBinding( 2, vk::DescriptorType::eSampler, 1, vk::ShaderStageFlagBits::eFragment ) } + }; + vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo( {}, resourceBindings ); + std::unique_ptr descriptorSetLayout = + vk::raii::su::make_unique( *device, descriptorSetLayoutCreateInfo ); + + // Create pipeline layout + vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo( {}, **descriptorSetLayout ); + std::unique_ptr pipelineLayout = + vk::raii::su::make_unique( *device, pipelineLayoutCreateInfo ); + + // Create a single pool to contain data for the descriptor set + std::array poolSizes = { + { vk::DescriptorPoolSize( vk::DescriptorType::eUniformBuffer, 1 ), + vk::DescriptorPoolSize( vk::DescriptorType::eSampledImage, 1 ), + vk::DescriptorPoolSize( vk::DescriptorType::eSampler, 1 ) } + }; + vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo( + vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 1, poolSizes ); + std::unique_ptr descriptorPool = + vk::raii::su::make_unique( *device, descriptorPoolCreateInfo ); + + // Populate descriptor sets + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( **descriptorPool, **descriptorSetLayout ); + std::unique_ptr descriptorSet = vk::raii::su::make_unique( + std::move( vk::raii::DescriptorSets( *device, descriptorSetAllocateInfo ).front() ) ); + + vk::DescriptorBufferInfo bufferInfo( **uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::DescriptorImageInfo imageInfo( + **textureData.sampler, **textureData.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::DescriptorImageInfo samplerInfo( **sampler, {}, {} ); + std::array descriptorWrites = { + { vk::WriteDescriptorSet( **descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), + vk::WriteDescriptorSet( **descriptorSet, 1, 0, vk::DescriptorType::eSampledImage, imageInfo ), + vk::WriteDescriptorSet( **descriptorSet, 2, 0, vk::DescriptorType::eSampler, samplerInfo ) } + }; + device->updateDescriptorSets( descriptorWrites, nullptr ); + + /* VULKAN_KEY_END */ + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + + std::unique_ptr graphicsPipeline = vk::raii::su::makeUniqueGraphicsPipeline( + *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + sizeof( texturedCubeData[0] ), + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + vk::FrontFace::eClockwise, + true, + *pipelineLayout, + *renderPass ); + + // Get the index of the next available swapchain image: + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + + vk::RenderPassBeginInfo renderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + commandBuffer->bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, nullptr ); + + commandBuffer->bindVertexBuffers( 0, { **vertexBufferData.buffer }, { 0 } ); + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + commandBuffer->draw( 12 * 3, 1, 0, 0 ); + commandBuffer->endRenderPass(); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + + while ( vk::Result::eTimeout == device->waitForFences( { **drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + device->waitIdle(); + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/SurfaceCapabilities/CMakeLists.txt b/RAII_Samples/SurfaceCapabilities/CMakeLists.txt new file mode 100644 index 0000000..33b9769 --- /dev/null +++ b/RAII_Samples/SurfaceCapabilities/CMakeLists.txt @@ -0,0 +1,39 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_SurfaceCapabilities) + +set(HEADERS +) + +set(SOURCES + SurfaceCapabilities.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +# Win32 exclusive vk::SurfaceCapabilitiesFullScreenExclusiveEXT is used +if(WIN32) + + add_executable(RAII_SurfaceCapabilities + ${HEADERS} + ${SOURCES} + ) + set_target_properties(RAII_SurfaceCapabilities PROPERTIES FOLDER "RAII_Samples") + target_link_libraries(RAII_SurfaceCapabilities PRIVATE utils) + +endif() diff --git a/RAII_Samples/SurfaceCapabilities/SurfaceCapabilities.cpp b/RAII_Samples/SurfaceCapabilities/SurfaceCapabilities.cpp new file mode 100644 index 0000000..e0812c8 --- /dev/null +++ b/RAII_Samples/SurfaceCapabilities/SurfaceCapabilities.cpp @@ -0,0 +1,181 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : SurfaceCapabilities +// Get surface capabilities. + +#include "../utils/utils.hpp" +#include "vulkan/vulkan.hpp" + +#include +#include +#include + +static char const * AppName = "SurfaceCapabilities"; +static char const * EngineName = "Vulkan.hpp"; + +void cout( vk::SurfaceCapabilitiesKHR const & surfaceCapabilities ) +{ + std::cout << "\tCapabilities:\n"; + std::cout << "\t\t" + << "currentExtent = " << surfaceCapabilities.currentExtent.width << " x " + << surfaceCapabilities.currentExtent.height << "\n"; + std::cout << "\t\t" + << "currentTransform = " << vk::to_string( surfaceCapabilities.currentTransform ) << "\n"; + std::cout << "\t\t" + << "maxImageArrayLayers = " << surfaceCapabilities.maxImageArrayLayers << "\n"; + std::cout << "\t\t" + << "maxImageCount = " << surfaceCapabilities.maxImageCount << "\n"; + std::cout << "\t\t" + << "maxImageExtent = " << surfaceCapabilities.maxImageExtent.width << " x " + << surfaceCapabilities.maxImageExtent.height << "\n"; + std::cout << "\t\t" + << "minImageCount = " << surfaceCapabilities.minImageCount << "\n"; + std::cout << "\t\t" + << "minImageExtent = " << surfaceCapabilities.minImageExtent.width << " x " + << surfaceCapabilities.minImageExtent.height << "\n"; + std::cout << "\t\t" + << "supportedCompositeAlpha = " << vk::to_string( surfaceCapabilities.supportedCompositeAlpha ) << "\n"; + std::cout << "\t\t" + << "supportedTransforms = " << vk::to_string( surfaceCapabilities.supportedTransforms ) << "\n"; + std::cout << "\t\t" + << "supportedUsageFlags = " << vk::to_string( surfaceCapabilities.supportedUsageFlags ) << "\n"; + std::cout << "\n"; +} + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + + std::vector instanceExtensionProperties = context->enumerateInstanceExtensionProperties(); + bool supportsGetSurfaceCapabilities2 = + ( std::find_if( instanceExtensionProperties.begin(), + instanceExtensionProperties.end(), + []( vk::ExtensionProperties const & ep ) { + return strcmp( ep.extensionName, VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME ) == 0; + } ) != instanceExtensionProperties.end() ); + + std::vector extensions = vk::su::getInstanceExtensions(); + if ( supportsGetSurfaceCapabilities2 ) + { + extensions.push_back( VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME ); + } + + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, extensions ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + + // enumerate the physicalDevices + vk::raii::PhysicalDevices physicalDevices( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + /* VULKAN_KEY_START */ + + std::cout << std::boolalpha; + for ( size_t i = 0; i < physicalDevices.size(); i++ ) + { + // some properties are only valid, if a corresponding extension is available! + std::vector extensionProperties = + physicalDevices[i].enumerateDeviceExtensionProperties(); + + std::cout << "PhysicalDevice " << i << "\n"; + if ( supportsGetSurfaceCapabilities2 ) + { + auto surfaceCapabilities2 = + physicalDevices[i] + .getSurfaceCapabilities2KHR( { **surfaceData.surface } ); + + vk::SurfaceCapabilitiesKHR const & surfaceCapabilities = + surfaceCapabilities2.get().surfaceCapabilities; + cout( surfaceCapabilities ); + + if ( vk::su::contains( extensionProperties, "VK_AMD_display_native_hdr" ) ) + { + vk::DisplayNativeHdrSurfaceCapabilitiesAMD displayNativeHdrSurfaceCapabilities = + surfaceCapabilities2.get(); + std::cout << "\tDisplayNativeHdrSurfaceCapabilitiesAMD:\n"; + std::cout << "\t\t" + << "localDimmingSupport = " << !!displayNativeHdrSurfaceCapabilities.localDimmingSupport << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_shared_presentable_image" ) ) + { + vk::SharedPresentSurfaceCapabilitiesKHR sharedPresentSurfaceCapabilities = + surfaceCapabilities2.get(); + std::cout << "\tSharedPresentSurfaceCapabilitiesKHR:\n"; + std::cout << "\t\t" + << "sharedPresentSupportedUsageFlags = " + << vk::to_string( sharedPresentSurfaceCapabilities.sharedPresentSupportedUsageFlags ) << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_EXT_full_screen_exclusive" ) ) + { + vk::SurfaceCapabilitiesFullScreenExclusiveEXT surfaceCapabilitiesFullScreenExclusive = + surfaceCapabilities2.get(); + std::cout << "\tSurfaceCapabilitiesFullScreenExclusiveEXT:\n"; + std::cout << "\t\t" + << "fullScreenExclusiveSupported = " + << !!surfaceCapabilitiesFullScreenExclusive.fullScreenExclusiveSupported << "\n"; + std::cout << "\n"; + } + + if ( vk::su::contains( extensionProperties, "VK_KHR_surface_protected_capabilities" ) ) + { + vk::SurfaceProtectedCapabilitiesKHR surfaceProtectedCapabilities = + surfaceCapabilities2.get(); + std::cout << "\tSurfaceProtectedCapabilitiesKHR:\n"; + std::cout << "\t\t" + << "supportsProtected = " << !!surfaceProtectedCapabilities.supportsProtected << "\n"; + std::cout << "\n"; + } + } + else + { + vk::SurfaceCapabilitiesKHR surfaceCapabilities = + physicalDevices[i].getSurfaceCapabilitiesKHR( **surfaceData.surface ); + cout( surfaceCapabilities ); + } + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/SurfaceFormats/CMakeLists.txt b/RAII_Samples/SurfaceFormats/CMakeLists.txt new file mode 100644 index 0000000..52ef9e6 --- /dev/null +++ b/RAII_Samples/SurfaceFormats/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_SurfaceFormats) + +set(HEADERS +) + +set(SOURCES + SurfaceFormats.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_SurfaceFormats + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_SurfaceFormats PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_SurfaceFormats PRIVATE utils) diff --git a/RAII_Samples/SurfaceFormats/SurfaceFormats.cpp b/RAII_Samples/SurfaceFormats/SurfaceFormats.cpp new file mode 100644 index 0000000..7ad27fe --- /dev/null +++ b/RAII_Samples/SurfaceFormats/SurfaceFormats.cpp @@ -0,0 +1,82 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : SurfaceFormats +// Get surface formats. + +#include "../utils/utils.hpp" +#include "vulkan/vulkan.hpp" + +#include +#include +#include + +static char const * AppName = "SurfaceFormats"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + + // enumerate the physicalDevices + vk::raii::PhysicalDevices physicalDevices( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + /* VULKAN_KEY_START */ + + std::cout << std::boolalpha; + for ( size_t i = 0; i < physicalDevices.size(); i++ ) + { + std::cout << "PhysicalDevice " << i << "\n"; + std::vector surfaceFormats = + physicalDevices[i].getSurfaceFormatsKHR( **surfaceData.surface ); + for ( size_t j = 0; j < surfaceFormats.size(); j++ ) + { + std::cout << "\tFormat " << j << "\n"; + std::cout << "\t\t" + << "colorSpace = " << vk::to_string( surfaceFormats[j].colorSpace ) << "\n"; + std::cout << "\t\t" + << "format = " << vk::to_string( surfaceFormats[j].format ) << "\n"; + std::cout << "\n"; + } + } + + /* VULKAN_KEY_END */ + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/Template/CMakeLists.txt b/RAII_Samples/Template/CMakeLists.txt new file mode 100644 index 0000000..a0f983c --- /dev/null +++ b/RAII_Samples/Template/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_Template) + +set(HEADERS +) + +set(SOURCES + Template.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_Template + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_Template PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_Template PRIVATE utils) diff --git a/RAII_Samples/Template/Template.cpp b/RAII_Samples/Template/Template.cpp new file mode 100644 index 0000000..3b3c7b6 --- /dev/null +++ b/RAII_Samples/Template/Template.cpp @@ -0,0 +1,211 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : Template +// Template sample to start from. Draw textured cube with mostly helpers. + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan.hpp" + +#include +#include + +static char const * AppName = "Template"; +static char const * EngineName = "Vulkan.hpp"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::DepthBufferData depthBufferData( + *physicalDevice, *device, vk::Format::eD16Unorm, surfaceData.extent ); + + vk::raii::su::TextureData textureData( *physicalDevice, *device ); + + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + textureData.setImage( *commandBuffer, vk::su::CheckerboardImageGenerator() ); + + vk::raii::su::BufferData uniformBufferData( + *physicalDevice, *device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::raii::su::copyToDevice( *uniformBufferData.deviceMemory, mvpcMatrix ); + + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, + { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, + { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); + std::unique_ptr pipelineLayout = + vk::raii::su::makeUniquePipelineLayout( *device, *descriptorSetLayout ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, depthBufferData.format ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); + + vk::raii::su::BufferData vertexBufferData( + *physicalDevice, *device, sizeof( texturedCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ); + vk::raii::su::copyToDevice( + *vertexBufferData.deviceMemory, texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); + + std::unique_ptr descriptorPool = vk::raii::su::makeUniqueDescriptorPool( + *device, { { vk::DescriptorType::eUniformBuffer, 1 }, { vk::DescriptorType::eCombinedImageSampler, 1 } } ); + std::unique_ptr descriptorSet = + vk::raii::su::makeUniqueDescriptorSet( *device, *descriptorPool, *descriptorSetLayout ); + + vk::raii::su::updateDescriptorSets( *device, + *descriptorSet, + { { vk::DescriptorType::eUniformBuffer, *uniformBufferData.buffer, nullptr } }, + { textureData } ); + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + std::unique_ptr graphicsPipeline = vk::raii::su::makeUniqueGraphicsPipeline( + *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + sizeof( texturedCubeData[0] ), + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + vk::FrontFace::eClockwise, + true, + *pipelineLayout, + *renderPass ); + + // Get the index of the next available swapchain image: + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + std::array clearValues; + clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); + vk::RenderPassBeginInfo renderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + + commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + commandBuffer->bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, nullptr ); + + commandBuffer->bindVertexBuffers( 0, { **vertexBufferData.buffer }, { 0 } ); + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + commandBuffer->draw( 12 * 3, 1, 0, 0 ); + commandBuffer->endRenderPass(); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + + while ( vk::Result::eTimeout == device->waitForFences( { **drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + device->waitIdle(); + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/TexelBuffer/CMakeLists.txt b/RAII_Samples/TexelBuffer/CMakeLists.txt new file mode 100644 index 0000000..35a421f --- /dev/null +++ b/RAII_Samples/TexelBuffer/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_TexelBuffer) + +set(HEADERS +) + +set(SOURCES + TexelBuffer.cpp +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_executable(RAII_TexelBuffer + ${HEADERS} + ${SOURCES} +) + +set_target_properties(RAII_TexelBuffer PROPERTIES FOLDER "RAII_Samples") +target_link_libraries(RAII_TexelBuffer PRIVATE utils) diff --git a/RAII_Samples/TexelBuffer/TexelBuffer.cpp b/RAII_Samples/TexelBuffer/TexelBuffer.cpp new file mode 100644 index 0000000..77c8a0a --- /dev/null +++ b/RAII_Samples/TexelBuffer/TexelBuffer.cpp @@ -0,0 +1,246 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// VulkanHpp Samples : TexelBuffer +// Use a texel buffer to draw a green triangle. + +#include "../../samples/utils/geometries.hpp" +#include "../../samples/utils/math.hpp" +#include "../utils/shaders.hpp" +#include "../utils/utils.hpp" +#include "SPIRV/GlslangToSpv.h" +#include "vulkan/vulkan.hpp" + +#include +#include + +static char const * AppName = "TexelBuffer"; +static char const * EngineName = "Vulkan.hpp"; + +static const std::string vertexShaderText = R"( +#version 400 + +#extension GL_ARB_separate_shader_objects : enable +#extension GL_ARB_shading_language_420pack : enable + +layout (binding = 0) uniform samplerBuffer texels; +layout (location = 0) out vec4 outColor; + +vec2 vertices[3]; + +void main() +{ + float r = texelFetch(texels, 0).r; + float g = texelFetch(texels, 1).r; + float b = texelFetch(texels, 2).r; + outColor = vec4(r, g, b, 1.0); + + vertices[0] = vec2(-1.0, -1.0); + vertices[1] = vec2( 1.0, -1.0); + vertices[2] = vec2( 0.0, 1.0); + gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0); +} +)"; + +int main( int /*argc*/, char ** /*argv*/ ) +{ + const float texels[] = { 118.0f / 255.0f, 185.0f / 255.0f, 0.0f }; + + try + { + std::unique_ptr context = vk::raii::su::make_unique(); + std::unique_ptr instance = + vk::raii::su::makeUniqueInstance( *context, AppName, EngineName, {}, vk::su::getInstanceExtensions() ); +#if !defined( NDEBUG ) + std::unique_ptr debugUtilsMessenger = + vk::raii::su::makeUniqueDebugUtilsMessengerEXT( *instance ); +#endif + std::unique_ptr physicalDevice = vk::raii::su::makeUniquePhysicalDevice( *instance ); + + vk::PhysicalDeviceProperties physicalDeviceProperties = physicalDevice->getProperties(); + if ( physicalDeviceProperties.limits.maxTexelBufferElements < 4 ) + { + std::cout << "maxTexelBufferElements too small\n"; + exit( -1 ); + } + + vk::Format texelFormat = vk::Format::eR32Sfloat; + vk::FormatProperties formatProperties = physicalDevice->getFormatProperties( texelFormat ); + if ( !( formatProperties.bufferFeatures & vk::FormatFeatureFlagBits::eUniformTexelBuffer ) ) + { + std::cout << "R32_SFLOAT format unsupported for texel buffer\n"; + exit( -1 ); + } + + vk::raii::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); + + std::pair graphicsAndPresentQueueFamilyIndex = + vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( *physicalDevice, *surfaceData.surface ); + std::unique_ptr device = vk::raii::su::makeUniqueDevice( + *physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); + + std::unique_ptr commandPool = + vk::raii::su::makeUniqueCommandPool( *device, graphicsAndPresentQueueFamilyIndex.first ); + std::unique_ptr commandBuffer = + vk::raii::su::makeUniqueCommandBuffer( *device, *commandPool ); + + std::unique_ptr graphicsQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.first, 0 ); + std::unique_ptr presentQueue = + vk::raii::su::make_unique( *device, graphicsAndPresentQueueFamilyIndex.second, 0 ); + + vk::raii::su::SwapChainData swapChainData( *physicalDevice, + *device, + *surfaceData.surface, + surfaceData.extent, + vk::ImageUsageFlagBits::eColorAttachment | + vk::ImageUsageFlagBits::eTransferSrc, + {}, + graphicsAndPresentQueueFamilyIndex.first, + graphicsAndPresentQueueFamilyIndex.second ); + + vk::raii::su::BufferData texelBufferData( + *physicalDevice, *device, sizeof( texels ), vk::BufferUsageFlagBits::eUniformTexelBuffer ); + texelBufferData.upload( texels ); + + vk::BufferViewCreateInfo bufferViewCreateInfo( {}, **texelBufferData.buffer, texelFormat, 0, sizeof( texels ) ); + std::unique_ptr texelBufferView = + vk::raii::su::make_unique( *device, bufferViewCreateInfo ); + + std::unique_ptr descriptorSetLayout = vk::raii::su::makeUniqueDescriptorSetLayout( + *device, { { vk::DescriptorType::eUniformTexelBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); + std::unique_ptr pipelineLayout = + vk::raii::su::makeUniquePipelineLayout( *device, *descriptorSetLayout ); + + vk::Format colorFormat = + vk::su::pickSurfaceFormat( physicalDevice->getSurfaceFormatsKHR( **surfaceData.surface ) ).format; + std::unique_ptr renderPass = + vk::raii::su::makeUniqueRenderPass( *device, colorFormat, vk::Format::eUndefined ); + + glslang::InitializeProcess(); + std::unique_ptr vertexShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText ); + std::unique_ptr fragmentShaderModule = + vk::raii::su::makeUniqueShaderModule( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C ); + glslang::FinalizeProcess(); + + std::vector> framebuffers = vk::raii::su::makeUniqueFramebuffers( + *device, *renderPass, swapChainData.imageViews, std::unique_ptr(), surfaceData.extent ); + + std::unique_ptr descriptorPool = + vk::raii::su::makeUniqueDescriptorPool( *device, { { vk::DescriptorType::eUniformTexelBuffer, 1 } } ); + std::unique_ptr descriptorSet = + vk::raii::su::makeUniqueDescriptorSet( *device, *descriptorPool, *descriptorSetLayout ); + + vk::raii::su::updateDescriptorSets( + *device, + *descriptorSet, + { { vk::DescriptorType::eUniformTexelBuffer, *texelBufferData.buffer, &*texelBufferView } }, + {} ); + + std::unique_ptr pipelineCache = + vk::raii::su::make_unique( *device, vk::PipelineCacheCreateInfo() ); + std::unique_ptr graphicsPipeline = + vk::raii::su::makeUniqueGraphicsPipeline( *device, + *pipelineCache, + *vertexShaderModule, + nullptr, + *fragmentShaderModule, + nullptr, + 0, + {}, + vk::FrontFace::eClockwise, + true, + *pipelineLayout, + *renderPass ); + + /* VULKAN_KEY_START */ + + // Get the index of the next available swapchain image: + std::unique_ptr imageAcquiredSemaphore = + vk::raii::su::make_unique( *device, vk::SemaphoreCreateInfo() ); + vk::Result result; + uint32_t imageIndex; + std::tie( result, imageIndex ) = + swapChainData.swapChain->acquireNextImage( vk::su::FenceTimeout, **imageAcquiredSemaphore ); + assert( result == vk::Result::eSuccess ); + assert( imageIndex < swapChainData.images.size() ); + + commandBuffer->begin( vk::CommandBufferBeginInfo() ); + + vk::ClearValue clearValue; + clearValue.color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); + vk::RenderPassBeginInfo renderPassBeginInfo( + **renderPass, **framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValue ); + + commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, **graphicsPipeline ); + commandBuffer->bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, **pipelineLayout, 0, { **descriptorSet }, nullptr ); + + commandBuffer->setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + + commandBuffer->draw( 3, 1, 0, 0 ); + commandBuffer->endRenderPass(); + commandBuffer->end(); + + std::unique_ptr drawFence = vk::raii::su::make_unique( *device, vk::FenceCreateInfo() ); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + vk::SubmitInfo submitInfo( **imageAcquiredSemaphore, waitDestinationStageMask, **commandBuffer ); + graphicsQueue->submit( submitInfo, **drawFence ); + + while ( vk::Result::eTimeout == device->waitForFences( { **drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + + vk::PresentInfoKHR presentInfoKHR( nullptr, **swapChainData.swapChain, imageIndex ); + result = presentQueue->presentKHR( presentInfoKHR ); + switch ( result ) + { + case vk::Result::eSuccess: break; + case vk::Result::eSuboptimalKHR: + std::cout << "vk::Queue::presentKHR returned vk::Result::eSuboptimalKHR !\n"; + break; + default: assert( false ); // an unexpected result is returned ! + } + std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + /* VULKAN_KEY_END */ + + device->waitIdle(); + } + catch ( vk::SystemError & err ) + { + std::cout << "vk::SystemError: " << err.what() << std::endl; + exit( -1 ); + } + catch ( std::exception & err ) + { + std::cout << "std::exception: " << err.what() << std::endl; + exit( -1 ); + } + catch ( ... ) + { + std::cout << "unknown error\n"; + exit( -1 ); + } + return 0; +} diff --git a/RAII_Samples/utils/CMakeLists.txt b/RAII_Samples/utils/CMakeLists.txt new file mode 100644 index 0000000..272b3de --- /dev/null +++ b/RAII_Samples/utils/CMakeLists.txt @@ -0,0 +1,37 @@ +# Copyright(c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.2) + +project(RAII_utils) + +set(HEADERS + shaders.hpp + utils.hpp +) + +set(SOURCES +) + +source_group(headers FILES ${HEADERS}) +source_group(sources FILES ${SOURCES}) + +add_library(RAII_utils + ${SOURCES} + ${HEADERS} +) + +target_link_libraries(RAII_utils PRIVATE utils) +target_compile_definitions(RAII_utils PUBLIC VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1) + diff --git a/RAII_Samples/utils/shaders.hpp b/RAII_Samples/utils/shaders.hpp new file mode 100644 index 0000000..4e2b214 --- /dev/null +++ b/RAII_Samples/utils/shaders.hpp @@ -0,0 +1,41 @@ +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "utils.hpp" +#include "../../samples/utils/shaders.hpp" + +namespace vk +{ + namespace raii + { + namespace su + { + template + std::unique_ptr makeUniqueShaderModule( vk::raii::Device const & device, + vk::ShaderStageFlagBits shaderStage, + std::string const & shaderText ) + { + std::vector shaderSPV; + if ( !vk::su::GLSLtoSPV( shaderStage, shaderText, shaderSPV ) ) + { + throw std::runtime_error( "Could not convert glsl shader to spir-v -> terminating" ); + } + + return vk::raii::su::make_unique( + device, vk::ShaderModuleCreateInfo( vk::ShaderModuleCreateFlags(), shaderSPV ) ); + } + } // namespace su + } // namespace raii +} // namespace vk diff --git a/RAII_Samples/utils/utils.hpp b/RAII_Samples/utils/utils.hpp new file mode 100644 index 0000000..da30920 --- /dev/null +++ b/RAII_Samples/utils/utils.hpp @@ -0,0 +1,1053 @@ +#pragma once + +// Copyright(c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#if defined( _MSC_VER ) +// no need to ignore any warnings with MSVC +#elif defined( __clang__ ) +# pragma clang diagnostic ignored "-Wmissing-braces" +#elif defined( __GNUC__ ) +// no need to ignore any warnings with GCC +#else +// unknown compiler... just ignore the warnings for yourselves ;) +#endif + +#include "../../samples/utils/utils.hpp" +#include "vulkan/vulkan_raii.hpp" + +#include + +namespace vk +{ + namespace raii + { + namespace su + { + vk::raii::DeviceMemory allocateDeviceMemory( vk::raii::Device const & device, + vk::PhysicalDeviceMemoryProperties const & memoryProperties, + vk::MemoryRequirements const & memoryRequirements, + vk::MemoryPropertyFlags memoryPropertyFlags ) + { + uint32_t memoryTypeIndex = + vk::su::findMemoryType( memoryProperties, memoryRequirements.memoryTypeBits, memoryPropertyFlags ); + vk::MemoryAllocateInfo memoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ); + return vk::raii::DeviceMemory( device, memoryAllocateInfo ); + } + + template + void copyToDevice( vk::raii::DeviceMemory const & deviceMemory, + T const * pData, + size_t count, + vk::DeviceSize stride = sizeof( T ) ) + { + assert( sizeof( T ) <= stride ); + uint8_t * deviceData = static_cast( deviceMemory.mapMemory( 0, count * stride ) ); + if ( stride == sizeof( T ) ) + { + memcpy( deviceData, pData, count * sizeof( T ) ); + } + else + { + for ( size_t i = 0; i < count; i++ ) + { + memcpy( deviceData, &pData[i], sizeof( T ) ); + deviceData += stride; + } + } + deviceMemory.unmapMemory(); + } + + template + void copyToDevice( vk::raii::DeviceMemory const & deviceMemory, T const & data ) + { + copyToDevice( deviceMemory, &data, 1 ); + } + + template + std::unique_ptr make_unique( Args &&... args ) + { +#if ( 14 <= VULKAN_HPP_CPP_VERSION ) + return std::make_unique( std::forward( args )... ); +#else + return std::unique_ptr( new T( std::forward( args )... ) ); +#endif + } + + template + void + oneTimeSubmit( vk::raii::CommandBuffer const & commandBuffer, vk::raii::Queue const & queue, Func const & func ) + { + commandBuffer.begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlagBits::eOneTimeSubmit ) ); + func( commandBuffer ); + commandBuffer.end(); + vk::SubmitInfo submitInfo( nullptr, nullptr, *commandBuffer ); + queue.submit( submitInfo, nullptr ); + queue.waitIdle(); + } + + template + void oneTimeSubmit( vk::raii::Device const & device, + vk::raii::CommandPool const & commandPool, + vk::raii::Queue const & queue, + Func const & func ) + { + vk::raii::CommandBuffers commandBuffers( device, { *commandPool, vk::CommandBufferLevel::ePrimary, 1 } ); + oneTimeSubmit( commandBuffers.front(), queue, func ); + } + + void setImageLayout( vk::raii::CommandBuffer const & commandBuffer, + vk::Image image, + vk::Format format, + vk::ImageLayout oldImageLayout, + vk::ImageLayout newImageLayout ) + { + vk::AccessFlags sourceAccessMask; + switch ( oldImageLayout ) + { + case vk::ImageLayout::eTransferDstOptimal: sourceAccessMask = vk::AccessFlagBits::eTransferWrite; break; + case vk::ImageLayout::ePreinitialized: sourceAccessMask = vk::AccessFlagBits::eHostWrite; break; + case vk::ImageLayout::eGeneral: // sourceAccessMask is empty + case vk::ImageLayout::eUndefined: break; + default: assert( false ); break; + } + + vk::PipelineStageFlags sourceStage; + switch ( oldImageLayout ) + { + case vk::ImageLayout::eGeneral: + case vk::ImageLayout::ePreinitialized: sourceStage = vk::PipelineStageFlagBits::eHost; break; + case vk::ImageLayout::eTransferDstOptimal: sourceStage = vk::PipelineStageFlagBits::eTransfer; break; + case vk::ImageLayout::eUndefined: sourceStage = vk::PipelineStageFlagBits::eTopOfPipe; break; + default: assert( false ); break; + } + + vk::AccessFlags destinationAccessMask; + switch ( newImageLayout ) + { + case vk::ImageLayout::eColorAttachmentOptimal: + destinationAccessMask = vk::AccessFlagBits::eColorAttachmentWrite; + break; + case vk::ImageLayout::eDepthStencilAttachmentOptimal: + destinationAccessMask = + vk::AccessFlagBits::eDepthStencilAttachmentRead | vk::AccessFlagBits::eDepthStencilAttachmentWrite; + break; + case vk::ImageLayout::eGeneral: // empty destinationAccessMask + case vk::ImageLayout::ePresentSrcKHR: break; + case vk::ImageLayout::eShaderReadOnlyOptimal: destinationAccessMask = vk::AccessFlagBits::eShaderRead; break; + case vk::ImageLayout::eTransferSrcOptimal: destinationAccessMask = vk::AccessFlagBits::eTransferRead; break; + case vk::ImageLayout::eTransferDstOptimal: destinationAccessMask = vk::AccessFlagBits::eTransferWrite; break; + default: assert( false ); break; + } + + vk::PipelineStageFlags destinationStage; + switch ( newImageLayout ) + { + case vk::ImageLayout::eColorAttachmentOptimal: + destinationStage = vk::PipelineStageFlagBits::eColorAttachmentOutput; + break; + case vk::ImageLayout::eDepthStencilAttachmentOptimal: + destinationStage = vk::PipelineStageFlagBits::eEarlyFragmentTests; + break; + case vk::ImageLayout::eGeneral: destinationStage = vk::PipelineStageFlagBits::eHost; break; + case vk::ImageLayout::ePresentSrcKHR: destinationStage = vk::PipelineStageFlagBits::eBottomOfPipe; break; + case vk::ImageLayout::eShaderReadOnlyOptimal: + destinationStage = vk::PipelineStageFlagBits::eFragmentShader; + break; + case vk::ImageLayout::eTransferDstOptimal: + case vk::ImageLayout::eTransferSrcOptimal: destinationStage = vk::PipelineStageFlagBits::eTransfer; break; + default: assert( false ); break; + } + + vk::ImageAspectFlags aspectMask; + if ( newImageLayout == vk::ImageLayout::eDepthStencilAttachmentOptimal ) + { + aspectMask = vk::ImageAspectFlagBits::eDepth; + if ( format == vk::Format::eD32SfloatS8Uint || format == vk::Format::eD24UnormS8Uint ) + { + aspectMask |= vk::ImageAspectFlagBits::eStencil; + } + } + else + { + aspectMask = vk::ImageAspectFlagBits::eColor; + } + + vk::ImageSubresourceRange imageSubresourceRange( aspectMask, 0, 1, 0, 1 ); + vk::ImageMemoryBarrier imageMemoryBarrier( sourceAccessMask, + destinationAccessMask, + oldImageLayout, + newImageLayout, + VK_QUEUE_FAMILY_IGNORED, + VK_QUEUE_FAMILY_IGNORED, + image, + imageSubresourceRange ); + return commandBuffer.pipelineBarrier( sourceStage, destinationStage, {}, nullptr, nullptr, imageMemoryBarrier ); + } + + struct BufferData + { + BufferData( vk::raii::PhysicalDevice const & physicalDevice, + vk::raii::Device const & device, + vk::DeviceSize size, + vk::BufferUsageFlags usage, + vk::MemoryPropertyFlags propertyFlags = vk::MemoryPropertyFlagBits::eHostVisible | + vk::MemoryPropertyFlagBits::eHostCoherent ) +#if !defined( NDEBUG ) + : m_size( size ), m_usage( usage ), m_propertyFlags( propertyFlags ) +#endif + { + buffer = vk::raii::su::make_unique( device, vk::BufferCreateInfo( {}, size, usage ) ); + deviceMemory = vk::raii::su::make_unique( vk::raii::su::allocateDeviceMemory( + device, physicalDevice.getMemoryProperties(), buffer->getMemoryRequirements(), propertyFlags ) ); + buffer->bindMemory( **deviceMemory, 0 ); + } + + template + void upload( DataType const & data ) const + { + assert( ( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostCoherent ) && + ( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostVisible ) ); + assert( sizeof( DataType ) <= m_size ); + + void * dataPtr = deviceMemory->mapMemory( 0, sizeof( DataType ) ); + memcpy( dataPtr, &data, sizeof( DataType ) ); + deviceMemory->unmapMemory(); + } + + template + void upload( std::vector const & data, size_t stride = 0 ) const + { + assert( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostVisible ); + + size_t elementSize = stride ? stride : sizeof( DataType ); + assert( sizeof( DataType ) <= elementSize ); + + copyToDevice( *deviceMemory, data.data(), data.size(), elementSize ); + } + + template + void upload( vk::raii::PhysicalDevice const & physicalDevice, + vk::raii::Device const & device, + vk::raii::CommandPool const & commandPool, + vk::raii::Queue const & queue, + std::vector const & data, + size_t stride ) const + { + assert( m_usage & vk::BufferUsageFlagBits::eTransferDst ); + assert( m_propertyFlags & vk::MemoryPropertyFlagBits::eDeviceLocal ); + + size_t elementSize = stride ? stride : sizeof( DataType ); + assert( sizeof( DataType ) <= elementSize ); + + size_t dataSize = data.size() * elementSize; + assert( dataSize <= m_size ); + + vk::raii::su::BufferData stagingBuffer( + physicalDevice, device, dataSize, vk::BufferUsageFlagBits::eTransferSrc ); + copyToDevice( *stagingBuffer.deviceMemory, data.data(), data.size(), elementSize ); + + vk::raii::su::oneTimeSubmit( + device, commandPool, queue, [&]( vk::raii::CommandBuffer const & commandBuffer ) { + commandBuffer.copyBuffer( **stagingBuffer.buffer, **this->buffer, vk::BufferCopy( 0, 0, dataSize ) ); + } ); + } + + std::unique_ptr buffer; + std::unique_ptr deviceMemory; +#if !defined( NDEBUG ) + private: + vk::DeviceSize m_size; + vk::BufferUsageFlags m_usage; + vk::MemoryPropertyFlags m_propertyFlags; +#endif + }; + + struct ImageData + { + ImageData( vk::raii::PhysicalDevice const & physicalDevice, + vk::raii::Device const & device, + vk::Format format_, + vk::Extent2D const & extent, + vk::ImageTiling tiling, + vk::ImageUsageFlags usage, + vk::ImageLayout initialLayout, + vk::MemoryPropertyFlags memoryProperties, + vk::ImageAspectFlags aspectMask ) + : format( format_ ) + { + vk::ImageCreateInfo imageCreateInfo( vk::ImageCreateFlags(), + vk::ImageType::e2D, + format, + vk::Extent3D( extent, 1 ), + 1, + 1, + vk::SampleCountFlagBits::e1, + tiling, + usage | vk::ImageUsageFlagBits::eSampled, + vk::SharingMode::eExclusive, + {}, + initialLayout ); + image = vk::raii::su::make_unique( device, imageCreateInfo ); + + deviceMemory = vk::raii::su::make_unique( vk::raii::su::allocateDeviceMemory( + device, physicalDevice.getMemoryProperties(), image->getMemoryRequirements(), memoryProperties ) ); + + image->bindMemory( **deviceMemory, 0 ); + + vk::ComponentMapping componentMapping( + ComponentSwizzle::eR, ComponentSwizzle::eG, ComponentSwizzle::eB, ComponentSwizzle::eA ); + vk::ImageSubresourceRange imageSubresourceRange( aspectMask, 0, 1, 0, 1 ); + vk::ImageViewCreateInfo imageViewCreateInfo( + {}, **image, vk::ImageViewType::e2D, format, componentMapping, imageSubresourceRange ); + imageView = vk::raii::su::make_unique( device, imageViewCreateInfo ); + } + + vk::Format format; + std::unique_ptr image; + std::unique_ptr deviceMemory; + std::unique_ptr imageView; + }; + + struct DepthBufferData : public ImageData + { + DepthBufferData( vk::raii::PhysicalDevice const & physicalDevice, + vk::raii::Device const & device, + vk::Format format, + vk::Extent2D const & extent ) + : ImageData( physicalDevice, + device, + format, + extent, + vk::ImageTiling::eOptimal, + vk::ImageUsageFlagBits::eDepthStencilAttachment, + vk::ImageLayout::eUndefined, + vk::MemoryPropertyFlagBits::eDeviceLocal, + vk::ImageAspectFlagBits::eDepth ) + {} + }; + + struct SurfaceData + { + SurfaceData( vk::raii::Instance const & instance, std::string const & windowName, vk::Extent2D const & extent_ ) + : extent( extent_ ), window( vk::su::createWindow( windowName, extent ) ) + { + VkSurfaceKHR _surface; + VkResult err = + glfwCreateWindowSurface( static_cast( *instance ), window.handle, nullptr, &_surface ); + if ( err != VK_SUCCESS ) + throw std::runtime_error( "Failed to create window!" ); + surface = vk::raii::su::make_unique( instance, _surface ); + } + + vk::Extent2D extent; + vk::su::WindowData window; + std::unique_ptr surface; + }; + + struct SwapChainData + { + SwapChainData( vk::raii::PhysicalDevice const & physicalDevice, + vk::raii::Device const & device, + vk::raii::SurfaceKHR const & surface, + vk::Extent2D const & extent, + vk::ImageUsageFlags usage, + std::unique_ptr const & oldSwapchain, + uint32_t graphicsQueueFamilyIndex, + uint32_t presentQueueFamilyIndex ) + { + vk::SurfaceFormatKHR surfaceFormat = + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surface ) ); + colorFormat = surfaceFormat.format; + + vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR( *surface ); + VkExtent2D swapchainExtent; + if ( surfaceCapabilities.currentExtent.width == std::numeric_limits::max() ) + { + // If the surface size is undefined, the size is set to the size of the images requested. + swapchainExtent.width = vk::su::clamp( + extent.width, surfaceCapabilities.minImageExtent.width, surfaceCapabilities.maxImageExtent.width ); + swapchainExtent.height = vk::su::clamp( + extent.height, surfaceCapabilities.minImageExtent.height, surfaceCapabilities.maxImageExtent.height ); + } + else + { + // If the surface size is defined, the swap chain size must match + swapchainExtent = surfaceCapabilities.currentExtent; + } + vk::SurfaceTransformFlagBitsKHR preTransform = + ( surfaceCapabilities.supportedTransforms & vk::SurfaceTransformFlagBitsKHR::eIdentity ) + ? vk::SurfaceTransformFlagBitsKHR::eIdentity + : surfaceCapabilities.currentTransform; + vk::CompositeAlphaFlagBitsKHR compositeAlpha = + ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::ePreMultiplied ) + ? vk::CompositeAlphaFlagBitsKHR::ePreMultiplied + : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::ePostMultiplied ) + ? vk::CompositeAlphaFlagBitsKHR::ePostMultiplied + : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::eInherit ) + ? vk::CompositeAlphaFlagBitsKHR::eInherit + : vk::CompositeAlphaFlagBitsKHR::eOpaque; + vk::PresentModeKHR presentMode = + vk::su::pickPresentMode( physicalDevice.getSurfacePresentModesKHR( *surface ) ); + vk::SwapchainCreateInfoKHR swapChainCreateInfo( {}, + *surface, + surfaceCapabilities.minImageCount, + colorFormat, + surfaceFormat.colorSpace, + swapchainExtent, + 1, + usage, + vk::SharingMode::eExclusive, + {}, + preTransform, + compositeAlpha, + presentMode, + true, + oldSwapchain ? **oldSwapchain : nullptr ); + if ( graphicsQueueFamilyIndex != presentQueueFamilyIndex ) + { + uint32_t queueFamilyIndices[2] = { graphicsQueueFamilyIndex, presentQueueFamilyIndex }; + // If the graphics and present queues are from different queue families, we either have to explicitly + // transfer ownership of images between the queues, or we have to create the swapchain with imageSharingMode + // as vk::SharingMode::eConcurrent + swapChainCreateInfo.imageSharingMode = vk::SharingMode::eConcurrent; + swapChainCreateInfo.queueFamilyIndexCount = 2; + swapChainCreateInfo.pQueueFamilyIndices = queueFamilyIndices; + } + swapChain = vk::raii::su::make_unique( device, swapChainCreateInfo ); + + images = swapChain->getImages(); + + imageViews.reserve( images.size() ); + vk::ComponentMapping componentMapping( + vk::ComponentSwizzle::eR, vk::ComponentSwizzle::eG, vk::ComponentSwizzle::eB, vk::ComponentSwizzle::eA ); + vk::ImageSubresourceRange subResourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ); + for ( auto image : images ) + { + vk::ImageViewCreateInfo imageViewCreateInfo( {}, + static_cast( image ), + vk::ImageViewType::e2D, + colorFormat, + componentMapping, + subResourceRange ); + imageViews.emplace_back( device, imageViewCreateInfo ); + } + } + + vk::Format colorFormat; + std::unique_ptr swapChain; + std::vector images; + std::vector imageViews; + }; + + struct TextureData + { + TextureData( vk::raii::PhysicalDevice const & physicalDevice, + vk::raii::Device const & device, + vk::Extent2D const & extent_ = { 256, 256 }, + vk::ImageUsageFlags usageFlags = {}, + vk::FormatFeatureFlags formatFeatureFlags = {}, + bool anisotropyEnable = false, + bool forceStaging = false ) + : format( vk::Format::eR8G8B8A8Unorm ), extent( extent_ ) + { + vk::FormatProperties formatProperties = physicalDevice.getFormatProperties( format ); + + formatFeatureFlags |= vk::FormatFeatureFlagBits::eSampledImage; + needsStaging = + forceStaging || ( ( formatProperties.linearTilingFeatures & formatFeatureFlags ) != formatFeatureFlags ); + vk::ImageTiling imageTiling; + vk::ImageLayout initialLayout; + vk::MemoryPropertyFlags requirements; + if ( needsStaging ) + { + assert( ( formatProperties.optimalTilingFeatures & formatFeatureFlags ) == formatFeatureFlags ); + stagingBufferData = vk::raii::su::make_unique( + physicalDevice, device, extent.width * extent.height * 4, vk::BufferUsageFlagBits::eTransferSrc ); + imageTiling = vk::ImageTiling::eOptimal; + usageFlags |= vk::ImageUsageFlagBits::eTransferDst; + initialLayout = vk::ImageLayout::eUndefined; + } + else + { + imageTiling = vk::ImageTiling::eLinear; + initialLayout = vk::ImageLayout::ePreinitialized; + requirements = vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostVisible; + } + imageData = vk::raii::su::make_unique( physicalDevice, + device, + format, + extent, + imageTiling, + usageFlags | vk::ImageUsageFlagBits::eSampled, + initialLayout, + requirements, + vk::ImageAspectFlagBits::eColor ); + + sampler = + vk::raii::su::make_unique( device, + vk::SamplerCreateInfo( vk::SamplerCreateFlags(), + vk::Filter::eLinear, + vk::Filter::eLinear, + vk::SamplerMipmapMode::eLinear, + vk::SamplerAddressMode::eRepeat, + vk::SamplerAddressMode::eRepeat, + vk::SamplerAddressMode::eRepeat, + 0.0f, + anisotropyEnable, + 16.0f, + false, + vk::CompareOp::eNever, + 0.0f, + 0.0f, + vk::BorderColor::eFloatOpaqueBlack ) ); + } + + template + void setImage( vk::raii::CommandBuffer const & commandBuffer, ImageGenerator const & imageGenerator ) + { + void * data = + needsStaging + ? stagingBufferData->deviceMemory->mapMemory( 0, stagingBufferData->buffer->getMemoryRequirements().size ) + : imageData->deviceMemory->mapMemory( 0, imageData->image->getMemoryRequirements().size ); + imageGenerator( data, extent ); + needsStaging ? stagingBufferData->deviceMemory->unmapMemory() : imageData->deviceMemory->unmapMemory(); + + if ( needsStaging ) + { + // Since we're going to blit to the texture image, set its layout to eTransferDstOptimal + vk::raii::su::setImageLayout( commandBuffer, + **imageData->image, + imageData->format, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eTransferDstOptimal ); + vk::BufferImageCopy copyRegion( 0, + extent.width, + extent.height, + vk::ImageSubresourceLayers( vk::ImageAspectFlagBits::eColor, 0, 0, 1 ), + vk::Offset3D( 0, 0, 0 ), + vk::Extent3D( extent, 1 ) ); + commandBuffer.copyBufferToImage( + **stagingBufferData->buffer, **imageData->image, vk::ImageLayout::eTransferDstOptimal, copyRegion ); + // Set the layout for the texture image from eTransferDstOptimal to SHADER_READ_ONLY + vk::raii::su::setImageLayout( commandBuffer, + **imageData->image, + imageData->format, + vk::ImageLayout::eTransferDstOptimal, + vk::ImageLayout::eShaderReadOnlyOptimal ); + } + else + { + // If we can use the linear tiled image as a texture, just do it + vk::raii::su::setImageLayout( commandBuffer, + **imageData->image, + imageData->format, + vk::ImageLayout::ePreinitialized, + vk::ImageLayout::eShaderReadOnlyOptimal ); + } + } + + vk::Format format; + vk::Extent2D extent; + bool needsStaging; + std::unique_ptr stagingBufferData; + std::unique_ptr imageData; + std::unique_ptr sampler; + }; + + std::pair + findGraphicsAndPresentQueueFamilyIndex( vk::raii::PhysicalDevice const & physicalDevice, + vk::raii::SurfaceKHR const & surface ) + { + std::vector queueFamilyProperties = physicalDevice.getQueueFamilyProperties(); + assert( queueFamilyProperties.size() < std::numeric_limits::max() ); + + uint32_t graphicsQueueFamilyIndex = vk::su::findGraphicsQueueFamilyIndex( queueFamilyProperties ); + if ( physicalDevice.getSurfaceSupportKHR( graphicsQueueFamilyIndex, *surface ) ) + { + return std::make_pair( + graphicsQueueFamilyIndex, + graphicsQueueFamilyIndex ); // the first graphicsQueueFamilyIndex does also support presents + } + + // the graphicsQueueFamilyIndex doesn't support present -> look for an other family index that supports both + // graphics and present + for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) + { + if ( ( queueFamilyProperties[i].queueFlags & vk::QueueFlagBits::eGraphics ) && + physicalDevice.getSurfaceSupportKHR( static_cast( i ), *surface ) ) + { + return std::make_pair( static_cast( i ), static_cast( i ) ); + } + } + + // there's nothing like a single family index that supports both graphics and present -> look for an other + // family index that supports present + for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) + { + if ( physicalDevice.getSurfaceSupportKHR( static_cast( i ), *surface ) ) + { + return std::make_pair( graphicsQueueFamilyIndex, static_cast( i ) ); + } + } + + throw std::runtime_error( "Could not find queues for both graphics or present -> terminating" ); + } + + std::unique_ptr makeUniqueCommandBuffer( vk::raii::Device const & device, + vk::raii::CommandPool const & commandPool ) + { + vk::CommandBufferAllocateInfo commandBufferAllocateInfo( *commandPool, vk::CommandBufferLevel::ePrimary, 1 ); + return vk::raii::su::make_unique( + std::move( vk::raii::CommandBuffers( device, commandBufferAllocateInfo ).front() ) ); + } + + std::unique_ptr makeUniqueCommandPool( vk::raii::Device const & device, + uint32_t queueFamilyIndex ) + { + vk::CommandPoolCreateInfo commandPoolCreateInfo( vk::CommandPoolCreateFlagBits::eResetCommandBuffer, + queueFamilyIndex ); + return vk::raii::su::make_unique( device, commandPoolCreateInfo ); + } + + std::unique_ptr + makeUniqueDebugUtilsMessengerEXT( vk::raii::Instance const & instance ) + { + return vk::raii::su::make_unique( + instance, vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); + } + + std::unique_ptr + makeUniqueDescriptorPool( vk::raii::Device const & device, + std::vector const & poolSizes ) + { + assert( !poolSizes.empty() ); + uint32_t maxSets = std::accumulate( + poolSizes.begin(), poolSizes.end(), 0, []( uint32_t sum, vk::DescriptorPoolSize const & dps ) { + return sum + dps.descriptorCount; + } ); + assert( 0 < maxSets ); + + vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo( + vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, maxSets, poolSizes ); + return vk::raii::su::make_unique( device, descriptorPoolCreateInfo ); + } + + std::unique_ptr + makeUniqueDescriptorSet( vk::raii::Device const & device, + vk::raii::DescriptorPool const & descriptorPool, + vk::raii::DescriptorSetLayout const & descriptorSetLayout ) + { + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ); + return vk::raii::su::make_unique( + std::move( vk::raii::DescriptorSets( device, descriptorSetAllocateInfo ).front() ) ); + } + + std::unique_ptr makeUniqueDescriptorSetLayout( + vk::raii::Device const & device, + std::vector> const & bindingData, + vk::DescriptorSetLayoutCreateFlags flags = {} ) + { + std::vector bindings( bindingData.size() ); + for ( size_t i = 0; i < bindingData.size(); i++ ) + { + bindings[i] = vk::DescriptorSetLayoutBinding( vk::su::checked_cast( i ), + std::get<0>( bindingData[i] ), + std::get<1>( bindingData[i] ), + std::get<2>( bindingData[i] ) ); + } + vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo( flags, bindings ); + return vk::raii::su::make_unique( device, descriptorSetLayoutCreateInfo ); + } + + std::unique_ptr + makeUniqueDevice( vk::raii::PhysicalDevice const & physicalDevice, + uint32_t queueFamilyIndex, + std::vector const & extensions = {}, + vk::PhysicalDeviceFeatures const * physicalDeviceFeatures = nullptr, + void const * pNext = nullptr ) + { + std::vector enabledExtensions; + enabledExtensions.reserve( extensions.size() ); + for ( auto const & ext : extensions ) + { + enabledExtensions.push_back( ext.data() ); + } + + float queuePriority = 0.0f; + vk::DeviceQueueCreateInfo deviceQueueCreateInfo( + vk::DeviceQueueCreateFlags(), queueFamilyIndex, 1, &queuePriority ); + vk::DeviceCreateInfo deviceCreateInfo( + vk::DeviceCreateFlags(), deviceQueueCreateInfo, {}, enabledExtensions, physicalDeviceFeatures ); + deviceCreateInfo.pNext = pNext; + return vk::raii::su::make_unique( physicalDevice, deviceCreateInfo ); + } + + std::vector> + makeUniqueFramebuffers( vk::raii::Device const & device, + vk::raii::RenderPass & renderPass, + std::vector const & imageViews, + std::unique_ptr const & depthImageView, + vk::Extent2D const & extent ) + { + vk::ImageView attachments[2]; + attachments[1] = depthImageView ? **depthImageView : vk::ImageView(); + + vk::FramebufferCreateInfo framebufferCreateInfo( vk::FramebufferCreateFlags(), + *renderPass, + depthImageView ? 2 : 1, + attachments, + extent.width, + extent.height, + 1 ); + std::vector> framebuffers; + framebuffers.reserve( imageViews.size() ); + for ( auto const & imageView : imageViews ) + { + attachments[0] = *imageView; + framebuffers.push_back( vk::raii::su::make_unique( device, framebufferCreateInfo ) ); + } + + return framebuffers; + } + + std::unique_ptr makeUniqueGraphicsPipeline( + vk::raii::Device const & device, + vk::raii::PipelineCache const & pipelineCache, + vk::raii::ShaderModule const & vertexShaderModule, + vk::SpecializationInfo const * vertexShaderSpecializationInfo, + vk::raii::ShaderModule const & fragmentShaderModule, + vk::SpecializationInfo const * fragmentShaderSpecializationInfo, + uint32_t vertexStride, + std::vector> const & vertexInputAttributeFormatOffset, + vk::FrontFace frontFace, + bool depthBuffered, + vk::raii::PipelineLayout const & pipelineLayout, + vk::raii::RenderPass const & renderPass ) + { + std::array pipelineShaderStageCreateInfos = { + vk::PipelineShaderStageCreateInfo( + {}, vk::ShaderStageFlagBits::eVertex, *vertexShaderModule, "main", vertexShaderSpecializationInfo ), + vk::PipelineShaderStageCreateInfo( + {}, vk::ShaderStageFlagBits::eFragment, *fragmentShaderModule, "main", fragmentShaderSpecializationInfo ) + }; + + std::vector vertexInputAttributeDescriptions; + vk::PipelineVertexInputStateCreateInfo pipelineVertexInputStateCreateInfo; + vk::VertexInputBindingDescription vertexInputBindingDescription( 0, vertexStride ); + + if ( 0 < vertexStride ) + { + vertexInputAttributeDescriptions.reserve( vertexInputAttributeFormatOffset.size() ); + for ( uint32_t i = 0; i < vertexInputAttributeFormatOffset.size(); i++ ) + { + vertexInputAttributeDescriptions.emplace_back( + i, 0, vertexInputAttributeFormatOffset[i].first, vertexInputAttributeFormatOffset[i].second ); + } + pipelineVertexInputStateCreateInfo.setVertexBindingDescriptions( vertexInputBindingDescription ); + pipelineVertexInputStateCreateInfo.setVertexAttributeDescriptions( vertexInputAttributeDescriptions ); + } + + vk::PipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateCreateInfo( + vk::PipelineInputAssemblyStateCreateFlags(), vk::PrimitiveTopology::eTriangleList ); + + vk::PipelineViewportStateCreateInfo pipelineViewportStateCreateInfo( + vk::PipelineViewportStateCreateFlags(), 1, nullptr, 1, nullptr ); + + vk::PipelineRasterizationStateCreateInfo pipelineRasterizationStateCreateInfo( + vk::PipelineRasterizationStateCreateFlags(), + false, + false, + vk::PolygonMode::eFill, + vk::CullModeFlagBits::eBack, + frontFace, + false, + 0.0f, + 0.0f, + 0.0f, + 1.0f ); + + vk::PipelineMultisampleStateCreateInfo pipelineMultisampleStateCreateInfo( {}, vk::SampleCountFlagBits::e1 ); + + vk::StencilOpState stencilOpState( + vk::StencilOp::eKeep, vk::StencilOp::eKeep, vk::StencilOp::eKeep, vk::CompareOp::eAlways ); + vk::PipelineDepthStencilStateCreateInfo pipelineDepthStencilStateCreateInfo( + vk::PipelineDepthStencilStateCreateFlags(), + depthBuffered, + depthBuffered, + vk::CompareOp::eLessOrEqual, + false, + false, + stencilOpState, + stencilOpState ); + + vk::ColorComponentFlags colorComponentFlags( vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | + vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA ); + vk::PipelineColorBlendAttachmentState pipelineColorBlendAttachmentState( false, + vk::BlendFactor::eZero, + vk::BlendFactor::eZero, + vk::BlendOp::eAdd, + vk::BlendFactor::eZero, + vk::BlendFactor::eZero, + vk::BlendOp::eAdd, + colorComponentFlags ); + vk::PipelineColorBlendStateCreateInfo pipelineColorBlendStateCreateInfo( + vk::PipelineColorBlendStateCreateFlags(), + false, + vk::LogicOp::eNoOp, + pipelineColorBlendAttachmentState, + { { 1.0f, 1.0f, 1.0f, 1.0f } } ); + + std::array dynamicStates = { vk::DynamicState::eViewport, vk::DynamicState::eScissor }; + vk::PipelineDynamicStateCreateInfo pipelineDynamicStateCreateInfo( vk::PipelineDynamicStateCreateFlags(), + dynamicStates ); + + vk::GraphicsPipelineCreateInfo graphicsPipelineCreateInfo( vk::PipelineCreateFlags(), + pipelineShaderStageCreateInfos, + &pipelineVertexInputStateCreateInfo, + &pipelineInputAssemblyStateCreateInfo, + nullptr, + &pipelineViewportStateCreateInfo, + &pipelineRasterizationStateCreateInfo, + &pipelineMultisampleStateCreateInfo, + &pipelineDepthStencilStateCreateInfo, + &pipelineColorBlendStateCreateInfo, + &pipelineDynamicStateCreateInfo, + *pipelineLayout, + *renderPass ); + + return vk::raii::su::make_unique( device, pipelineCache, graphicsPipelineCreateInfo ); + } + + std::unique_ptr makeUniqueImage( vk::raii::Device const & device ) + { + vk::ImageCreateInfo imageCreateInfo( {}, + vk::ImageType::e2D, + vk::Format::eB8G8R8A8Unorm, + vk::Extent3D( 640, 640, 1 ), + 1, + 1, + vk::SampleCountFlagBits::e1, + vk::ImageTiling::eLinear, + vk::ImageUsageFlagBits::eTransferSrc ); + return vk::raii::su::make_unique( device, imageCreateInfo ); + } + + std::unique_ptr makeUniqueInstance( vk::raii::Context const & context, + std::string const & appName, + std::string const & engineName, + std::vector const & layers = {}, + std::vector const & extensions = {}, + uint32_t apiVersion = VK_API_VERSION_1_0 ) + { + vk::ApplicationInfo applicationInfo( appName.c_str(), 1, engineName.c_str(), 1, apiVersion ); + std::vector enabledLayers = + vk::su::gatherLayers( layers, context.enumerateInstanceLayerProperties() ); + std::vector enabledExtensions = + vk::su::gatherExtensions( extensions, context.enumerateInstanceExtensionProperties() ); +#if defined( NDEBUG ) + vk::StructureChain +#else + vk::StructureChain +#endif + instanceCreateInfoChain = + vk::su::makeInstanceCreateInfoChain( applicationInfo, enabledLayers, enabledExtensions ); + + return vk::raii::su::make_unique( context, + instanceCreateInfoChain.get() ); + } + + std::unique_ptr makeUniquePhysicalDevice( vk::raii::Instance const & instance ) + { + return vk::raii::su::make_unique( + std::move( vk::raii::PhysicalDevices( instance ).front() ) ); + } + + std::unique_ptr + makeUniquePipelineLayout( vk::raii::Device const & device, + vk::raii::DescriptorSetLayout const & descriptorSetLayout ) + { + vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ); + return vk::raii::su::make_unique( device, pipelineLayoutCreateInfo ); + } + + std::unique_ptr + makeUniqueRenderPass( vk::raii::Device const & device, + vk::Format colorFormat, + vk::Format depthFormat, + vk::AttachmentLoadOp loadOp = vk::AttachmentLoadOp::eClear, + vk::ImageLayout colorFinalLayout = vk::ImageLayout::ePresentSrcKHR ) + { + std::vector attachmentDescriptions; + assert( colorFormat != vk::Format::eUndefined ); + attachmentDescriptions.emplace_back( vk::AttachmentDescriptionFlags(), + colorFormat, + vk::SampleCountFlagBits::e1, + loadOp, + vk::AttachmentStoreOp::eStore, + vk::AttachmentLoadOp::eDontCare, + vk::AttachmentStoreOp::eDontCare, + vk::ImageLayout::eUndefined, + colorFinalLayout ); + if ( depthFormat != vk::Format::eUndefined ) + { + attachmentDescriptions.emplace_back( vk::AttachmentDescriptionFlags(), + depthFormat, + vk::SampleCountFlagBits::e1, + loadOp, + vk::AttachmentStoreOp::eDontCare, + vk::AttachmentLoadOp::eDontCare, + vk::AttachmentStoreOp::eDontCare, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eDepthStencilAttachmentOptimal ); + } + vk::AttachmentReference colorAttachment( 0, vk::ImageLayout::eColorAttachmentOptimal ); + vk::AttachmentReference depthAttachment( 1, vk::ImageLayout::eDepthStencilAttachmentOptimal ); + vk::SubpassDescription subpassDescription( vk::SubpassDescriptionFlags(), + vk::PipelineBindPoint::eGraphics, + {}, + colorAttachment, + {}, + ( depthFormat != vk::Format::eUndefined ) ? &depthAttachment + : nullptr ); + vk::RenderPassCreateInfo renderPassCreateInfo( + vk::RenderPassCreateFlags(), attachmentDescriptions, subpassDescription ); + return vk::raii::su::make_unique( device, renderPassCreateInfo ); + } + + vk::Format pickDepthFormat( vk::raii::PhysicalDevice const & physicalDevice ) + { + std::vector candidates = { vk::Format::eD32Sfloat, + vk::Format::eD32SfloatS8Uint, + vk::Format::eD24UnormS8Uint }; + for ( vk::Format format : candidates ) + { + vk::FormatProperties props = physicalDevice.getFormatProperties( format ); + + if ( props.optimalTilingFeatures & vk::FormatFeatureFlagBits::eDepthStencilAttachment ) + { + return format; + } + } + throw std::runtime_error( "failed to find supported format!" ); + } + + void submitAndWait( vk::raii::Device const & device, + vk::raii::Queue const & queue, + vk::raii::CommandBuffer const & commandBuffer ) + { + vk::raii::Fence fence( device, vk::FenceCreateInfo() ); + queue.submit( vk::SubmitInfo( nullptr, nullptr, *commandBuffer ), *fence ); + while ( vk::Result::eTimeout == device.waitForFences( { *fence }, VK_TRUE, vk::su::FenceTimeout ) ) + ; + } + + void updateDescriptorSets( + vk::raii::Device const & device, + vk::raii::DescriptorSet const & descriptorSet, + std::vector> const & + bufferData, + vk::raii::su::TextureData const & textureData, + uint32_t bindingOffset = 0 ) + { + std::vector bufferInfos; + bufferInfos.reserve( bufferData.size() ); + + std::vector writeDescriptorSets; + writeDescriptorSets.reserve( bufferData.size() + 1 ); + uint32_t dstBinding = bindingOffset; + for ( auto const & bhd : bufferData ) + { + bufferInfos.emplace_back( *std::get<1>( bhd ), 0, VK_WHOLE_SIZE ); + vk::BufferView bufferView; + if ( std::get<2>( bhd ) ) + { + bufferView = **std::get<2>( bhd ); + } + writeDescriptorSets.emplace_back( *descriptorSet, + dstBinding++, + 0, + 1, + std::get<0>( bhd ), + nullptr, + &bufferInfos.back(), + std::get<2>( bhd ) ? &bufferView : nullptr ); + } + + vk::DescriptorImageInfo imageInfo( + **textureData.sampler, **textureData.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + writeDescriptorSets.emplace_back( + *descriptorSet, dstBinding, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo, nullptr, nullptr ); + + device.updateDescriptorSets( writeDescriptorSets, nullptr ); + } + + void updateDescriptorSets( + vk::raii::Device const & device, + vk::raii::DescriptorSet const & descriptorSet, + std::vector> const & + bufferData, + std::vector const & textureData, + uint32_t bindingOffset = 0 ) + { + std::vector bufferInfos; + bufferInfos.reserve( bufferData.size() ); + + std::vector writeDescriptorSets; + writeDescriptorSets.reserve( bufferData.size() + ( textureData.empty() ? 0 : 1 ) ); + uint32_t dstBinding = bindingOffset; + for ( auto const & bhd : bufferData ) + { + bufferInfos.emplace_back( *std::get<1>( bhd ), 0, VK_WHOLE_SIZE ); + vk::BufferView bufferView; + if ( std::get<2>( bhd ) ) + { + bufferView = **std::get<2>( bhd ); + } + writeDescriptorSets.emplace_back( *descriptorSet, + dstBinding++, + 0, + 1, + std::get<0>( bhd ), + nullptr, + &bufferInfos.back(), + std::get<2>( bhd ) ? &bufferView : nullptr ); + } + + std::vector imageInfos; + if ( !textureData.empty() ) + { + imageInfos.reserve( textureData.size() ); + for ( auto const & thd : textureData ) + { + imageInfos.emplace_back( + **thd.sampler, **thd.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + } + writeDescriptorSets.emplace_back( *descriptorSet, + dstBinding, + 0, + vk::su::checked_cast( imageInfos.size() ), + vk::DescriptorType::eCombinedImageSampler, + imageInfos.data(), + nullptr, + nullptr ); + } + + device.updateDescriptorSets( writeDescriptorSets, nullptr ); + } + + } // namespace su + } // namespace raii +} // namespace vk diff --git a/README.md b/README.md index 3b11a78..1796603 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,10 @@ In some cases it might be necessary to move Vulkan-Hpp to a custom namespace. Th Vulkan-Hpp declares a class for all handles to ensure full type safety and to add support for member functions on handles. A member function has been added to a handle class for each function which accepts the corresponding handle as first parameter. Instead of `vkBindBufferMemory(device, ...)` one can write `device.bindBufferMemory(...)` or `vk::bindBufferMemory(device, ...)`. +### namespace vk::raii + +There is an additional header named vulkan_raii.hpp generated. That header holds raii-compliant wrapper classes for the handle types. That is, for e.g. the handle type VkInstance, there's a raii-compliant wrapper vk::raii::Instance. Please have a look at the samples using those classes in the directory RAII_Samples. + ### C/C++ Interop for Handles On 64-bit platforms Vulkan-Hpp supports implicit conversions between C++ Vulkan handles and C Vulkan handles. On 32-bit platforms all non-dispatchable handles are defined as `uint64_t`, thus preventing type-conversion checks at compile time which would catch assignments between incompatible handle types.. Due to that Vulkan-Hpp does not enable implicit conversion for 32-bit platforms by default and it is recommended to use a `static_cast` for the conversion like this: `VkDevice = static_cast(cppDevice)` to prevent converting some arbitrary int to a handle or vice versa by accident. If you're developing your code on a 64-bit platform, but want compile your code for a 32-bit platform without adding the explicit casts you can define `VULKAN_HPP_TYPESAFE_CONVERSION` to 1 in your build system or before including `vulkan.hpp`. On 64-bit platforms this define is set to 1 by default and can be set to 0 to disable implicit conversions. diff --git a/VulkanHppGenerator.cpp b/VulkanHppGenerator.cpp index 93a7f48..3554aa1 100644 --- a/VulkanHppGenerator.cpp +++ b/VulkanHppGenerator.cpp @@ -47,7 +47,9 @@ std::string createEnumValueName( std::string const & name, bool bitmask, std::string const & tag ); std::string createSuccessCode( std::string const & code, std::set const & tags ); -std::string determineCommandName( std::string const & vulkanCommandName, std::string const & firstArgumentType ); +std::string determineCommandName( std::string const & vulkanCommandName, + std::string const & argumentType, + std::set const & tags ); std::string determineNoDiscard( bool multiSuccessCodes, bool multiErrorCodes ); std::set determineSkippedParams( size_t returnParamIndex, std::map const & vectorParamIndices ); std::string extractTag( int line, std::string const & name, std::set const & tags ); @@ -56,8 +58,10 @@ std::map getAttributes( tinyxml2::XMLElement const * e template std::vector getChildElements( ElementContainer const * element ); std::string getEnumPostfix( std::string const & name, std::set const & tags, std::string & prefix ); +std::string namespacedType( std::string const & type ); std::string readTypePostfix( tinyxml2::XMLNode const * node ); std::string readTypePrefix( tinyxml2::XMLNode const * node ); +void replaceAll( std::string & str, std::string const & from, std::string const & to ); std::string replaceWithMap( std::string const & input, std::map replacements ); std::string startLowerCase( std::string const & input ); std::string startUpperCase( std::string const & input ); @@ -73,6 +77,14 @@ std::string trimEnd( std::string const & input ); std::string trimStars( std::string const & input ); void warn( bool condition, int line, std::string const & message ); +#if defined( NDEBUG ) +template +void unreferenced( T const & t ) +{ + t; +}; +#endif + const std::set ignoreLens = { "null-terminated", R"(latexmath:[\lceil{\mathit{rasterizationSamples} \over 32}\rceil])", "2*VK_UUID_SIZE", @@ -287,28 +299,50 @@ std::string createSuccessCode( std::string const & code, std::set c return "e" + toCamelCase( stripPostfix( stripPrefix( code, "VK_" ), tag ) ) + tag; } -std::string determineCommandName( std::string const & vulkanCommandName, std::string const & firstArgumentType ) +std::string determineCommandName( std::string const & vulkanCommandName, + std::string const & argumentType, + std::set const & tags ) { std::string commandName( startLowerCase( stripPrefix( vulkanCommandName, "vk" ) ) ); - std::string searchName = stripPrefix( firstArgumentType, "Vk" ); - size_t pos = commandName.find( searchName ); - if ( ( pos == std::string::npos ) && isupper( searchName[0] ) ) + + if ( !argumentType.empty() ) { - searchName[0] = static_cast( tolower( searchName[0] ) ); - pos = commandName.find( searchName ); - } - if ( pos != std::string::npos ) - { - commandName.erase( pos, searchName.length() ); - } - else if ( ( searchName == "commandBuffer" ) && beginsWith( commandName, "cmd" ) ) - { - commandName.erase( 0, 3 ); - pos = 0; - } - if ( ( pos == 0 ) && isupper( commandName[0] ) ) - { - commandName[0] = static_cast( tolower( commandName[0] ) ); + std::string searchName = stripPrefix( argumentType, "Vk" ); + std::string argumentTag = findTag( tags, argumentType ); + if ( !argumentTag.empty() ) + { + searchName = stripPostfix( searchName, argumentTag ); + } + size_t pos = commandName.find( searchName ); + if ( ( pos == std::string::npos ) && isupper( searchName[0] ) ) + { + searchName[0] = static_cast( tolower( searchName[0] ) ); + pos = commandName.find( searchName ); + } + if ( pos != std::string::npos ) + { + size_t len = searchName.length(); + if ( commandName.find( searchName + "s" ) == pos ) + { + // filter out any plural of the searchName as well! + ++len; + } + commandName.erase( pos, len ); + } + else if ( ( searchName == "commandBuffer" ) && beginsWith( commandName, "cmd" ) ) + { + commandName.erase( 0, 3 ); + pos = 0; + } + if ( ( pos == 0 ) && isupper( commandName[0] ) ) + { + commandName[0] = static_cast( tolower( commandName[0] ) ); + } + std::string commandTag = findTag( tags, commandName ); + if ( !argumentTag.empty() && ( argumentTag == commandTag ) ) + { + commandName = stripPostfix( commandName, argumentTag ); + } } return commandName; } @@ -463,6 +497,11 @@ std::pair, std::string> readModifiers( tinyxml2::XMLNod return std::make_pair( arraySizes, bitCount ); } +std::string namespacedType( std::string const & type ) +{ + return beginsWith( type, "Vk" ) ? ( "VULKAN_HPP_NAMESPACE::" + stripPrefix( type, "Vk" ) ) : type; +} + std::string readTypePostfix( tinyxml2::XMLNode const * node ) { std::string postfix; @@ -483,6 +522,16 @@ std::string readTypePrefix( tinyxml2::XMLNode const * node ) return prefix; } +void replaceAll( std::string & str, std::string const & from, std::string const & to ) +{ + size_t pos = 0; + while ( ( pos = str.find( from, pos ) ) != std::string::npos ) + { + str.replace( pos, from.length(), to ); + pos += to.length(); // Handles case where 'to' is a substring of 'from' + } +} + std::string replaceWithMap( std::string const & input, std::map replacements ) { // This will match ${someVariable} and contain someVariable in match group 1 @@ -653,16 +702,19 @@ std::string toUpperCase( std::string const & name ) std::vector tokenize( std::string const & tokenString, std::string const & separator ) { std::vector tokens; - size_t start = 0, end; - do + if ( !tokenString.empty() ) { - end = tokenString.find( separator, start ); - if ( start != end ) + size_t start = 0, end; + do { - tokens.push_back( trim( tokenString.substr( start, end - start ) ) ); - } - start = end + separator.length(); - } while ( end != std::string::npos ); + end = tokenString.find( separator, start ); + if ( start != end ) + { + tokens.push_back( trim( tokenString.substr( start, end - start ) ) ); + } + start = end + separator.length(); + } while ( end != std::string::npos ); + } return tokens; } @@ -1063,6 +1115,7 @@ void VulkanHppGenerator::appendCall( std::string & str, void VulkanHppGenerator::appendCommand( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition ) const { bool appendedFunction = false; @@ -1084,13 +1137,13 @@ void VulkanHppGenerator::appendCommand( std::string & str, if ( commandData.returnType == "VkResult" ) { // function returning a result but no fancy input have either standard or enhanced call - appendCommandStandardOrEnhanced( str, name, commandData, definition ); + appendCommandStandardOrEnhanced( str, name, commandData, initialSkipCount, definition ); appendedFunction = true; } else { // void functions and functions returning some value with no fancy input have just standard call - appendCommandStandard( str, name, commandData, definition ); + appendCommandStandard( str, name, commandData, initialSkipCount, definition ); appendedFunction = true; } } @@ -1098,7 +1151,7 @@ void VulkanHppGenerator::appendCommand( std::string & str, { // functions with some fancy input have both, standard and enhanced call appendCommandStandardAndEnhanced( - str, name, commandData, definition, vectorParamIndices, nonConstPointerParamIndices ); + str, name, commandData, initialSkipCount, definition, vectorParamIndices, nonConstPointerParamIndices ); appendedFunction = true; } } @@ -1115,14 +1168,14 @@ void VulkanHppGenerator::appendCommand( std::string & str, if ( commandData.returnType == "VkResult" ) { // provide standard, enhanced, and unique call - appendCommandUnique( str, name, commandData, nonConstPointerParamIndices[0], definition ); + appendCommandUnique( str, name, commandData, initialSkipCount, nonConstPointerParamIndices[0], definition ); appendedFunction = true; } else if ( ( commandData.returnType == "void" ) && beginsWith( name, "vkGet" ) ) { // it's a handle type, but without construction and destruction function; it's just get appendCommandStandardAndEnhanced( - str, name, commandData, definition, vectorParamIndices, nonConstPointerParamIndices ); + str, name, commandData, initialSkipCount, definition, vectorParamIndices, nonConstPointerParamIndices ); appendedFunction = true; } } @@ -1135,8 +1188,13 @@ void VulkanHppGenerator::appendCommand( std::string & str, ( vectorParamIndices.begin()->second == std::next( vectorParamIndices.begin() )->second ) ) { // provide standard, enhanced, vector, singular, and unique (and the combinations!) calls - appendCommandVectorSingularUnique( - str, name, commandData, vectorParamIndices, nonConstPointerParamIndices[0], definition ); + appendCommandVectorSingularUnique( str, + name, + commandData, + initialSkipCount, + vectorParamIndices, + nonConstPointerParamIndices[0], + definition ); appendedFunction = true; } } @@ -1145,8 +1203,13 @@ void VulkanHppGenerator::appendCommand( std::string & str, ( vectorParamIndices.size() == 1 ) ) { // provide standard, enhanced, vector, and unique (and the combinations!) calls - appendCommandVectorUnique( - str, name, commandData, vectorParamIndices, nonConstPointerParamIndices[0], definition ); + appendCommandVectorUnique( str, + name, + commandData, + initialSkipCount, + vectorParamIndices, + nonConstPointerParamIndices[0], + definition ); appendedFunction = true; } } @@ -1158,7 +1221,7 @@ void VulkanHppGenerator::appendCommand( std::string & str, { // provide standard, enhanced, and chained call appendCommandChained( - str, name, commandData, definition, vectorParamIndices, nonConstPointerParamIndices[0] ); + str, name, commandData, initialSkipCount, definition, vectorParamIndices, nonConstPointerParamIndices[0] ); appendedFunction = true; } } @@ -1170,7 +1233,7 @@ void VulkanHppGenerator::appendCommand( std::string & str, if ( ( commandData.returnType == "VkResult" ) || ( commandData.returnType == "void" ) ) { appendCommandStandardAndEnhanced( - str, name, commandData, definition, vectorParamIndices, nonConstPointerParamIndices ); + str, name, commandData, initialSkipCount, definition, vectorParamIndices, nonConstPointerParamIndices ); appendedFunction = true; } } @@ -1179,7 +1242,7 @@ void VulkanHppGenerator::appendCommand( std::string & str, { // provide standard, enhanced, and singular calls appendCommandSingular( - str, name, commandData, definition, vectorParamIndices, nonConstPointerParamIndices[0] ); + str, name, commandData, initialSkipCount, definition, vectorParamIndices, nonConstPointerParamIndices[0] ); appendedFunction = true; } } @@ -1194,7 +1257,7 @@ void VulkanHppGenerator::appendCommand( std::string & str, if ( ( commandData.returnType == "VkResult" ) || ( commandData.returnType == "void" ) ) { appendCommandVectorChained( - str, name, commandData, definition, vectorParamIndices, nonConstPointerParamIndices ); + str, name, commandData, initialSkipCount, definition, vectorParamIndices, nonConstPointerParamIndices ); appendedFunction = true; } } @@ -1209,8 +1272,13 @@ void VulkanHppGenerator::appendCommand( std::string & str, if ( ( commandData.returnType == "VkResult" ) && ( 1 < commandData.successCodes.size() ) ) { // two returns and a non-trivial success code -> need to return a complex ResultValue!! - appendCommandStandardAndEnhanced( - str, name, commandData, definition, vectorParamIndices, nonConstPointerParamIndices ); + appendCommandStandardAndEnhanced( str, + name, + commandData, + initialSkipCount, + definition, + vectorParamIndices, + nonConstPointerParamIndices ); appendedFunction = true; } break; @@ -1226,8 +1294,13 @@ void VulkanHppGenerator::appendCommand( std::string & str, if ( ( commandData.returnType == "VkResult" ) || ( commandData.returnType == "void" ) ) { // provide standard, enhanced, and vector calls - appendCommandVector( - str, name, commandData, definition, *vectorParamIndexIt, nonConstPointerParamIndices ); + appendCommandVector( str, + name, + commandData, + initialSkipCount, + definition, + *vectorParamIndexIt, + nonConstPointerParamIndices ); appendedFunction = true; } } @@ -1242,8 +1315,13 @@ void VulkanHppGenerator::appendCommand( std::string & str, ( commandData.returnType == "VkResult" ) ) { // provide standard, enhanced deprecated, enhanced, and enhanced with allocator calls - appendCommandStandardEnhancedDeprecatedAllocator( - str, name, commandData, definition, vectorParamIndices, nonConstPointerParamIndices ); + appendCommandStandardEnhancedDeprecatedAllocator( str, + name, + commandData, + initialSkipCount, + definition, + vectorParamIndices, + nonConstPointerParamIndices ); appendedFunction = true; } } @@ -1274,7 +1352,7 @@ void VulkanHppGenerator::appendCommand( std::string & str, // both vectors, as well as the size parameter are non-const pointer that is output parameters // provide standard, enhanced, vector and deprecated calls! appendCommandVectorDeprecated( - str, name, commandData, vectorParamIndices, nonConstPointerParamIndices, definition ); + str, name, commandData, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, definition ); appendedFunction = true; } } @@ -1294,18 +1372,19 @@ void VulkanHppGenerator::appendCommand( std::string & str, aliasCommandData.extensions = ad.second.extensions; aliasCommandData.feature = ad.second.feature; aliasCommandData.xmlLine = ad.second.xmlLine; - appendCommand( str, ad.first, aliasCommandData, definition ); + appendCommand( str, ad.first, aliasCommandData, initialSkipCount, definition ); } } return; } - throw std::runtime_error( "Never encountered a function like " + name + " !" ); + throw std::runtime_error( "Never encountered a function like <" + name + "> !" ); } void VulkanHppGenerator::appendCommandChained( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t nonConstPointerIndex ) const @@ -1328,13 +1407,14 @@ ${leave})"; std::map( { { "commandEnhanced", ( commandData.returnType == "void" ) - ? constructCommandVoidGetValue( name, commandData, definition, vectorParamIndices, nonConstPointerIndex ) - : constructCommandResultGetValue( name, commandData, definition, nonConstPointerIndex ) }, + ? constructCommandVoidGetValue( + name, commandData, initialSkipCount, definition, vectorParamIndices, nonConstPointerIndex ) + : constructCommandResultGetValue( name, commandData, initialSkipCount, definition, nonConstPointerIndex ) }, { "commandEnhancedChained", ( commandData.returnType == "void" ) - ? constructCommandVoidGetChain( name, commandData, definition, nonConstPointerIndex ) - : constructCommandResultGetChain( name, commandData, definition, nonConstPointerIndex ) }, - { "commandStandard", constructCommandStandard( name, commandData, definition ) }, + ? constructCommandVoidGetChain( name, commandData, initialSkipCount, definition, nonConstPointerIndex ) + : constructCommandResultGetChain( name, commandData, initialSkipCount, definition, nonConstPointerIndex ) }, + { "commandStandard", constructCommandStandard( name, commandData, initialSkipCount, definition ) }, { "enter", enter }, { "leave", leave }, { "newlineOnDefinition", definition ? "\n" : "" } } ) ); @@ -1343,6 +1423,7 @@ ${leave})"; void VulkanHppGenerator::appendCommandSingular( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const @@ -1365,14 +1446,15 @@ ${leave})"; functionTemplate, std::map( { { "commandEnhanced", - constructCommandResultGetVector( name, commandData, definition, vectorParamIndices, returnParamIndex ) }, + constructCommandResultGetVector( + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndex ) }, { "commandEnhancedDeprecated", constructCommandResultGetVectorDeprecated( - name, commandData, definition, vectorParamIndices, returnParamIndex ) }, + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndex ) }, { "commandEnhancedSingular", constructCommandResultGetVectorSingular( - name, commandData, definition, vectorParamIndices, returnParamIndex ) }, - { "commandStandard", constructCommandStandard( name, commandData, definition ) }, + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndex ) }, + { "commandStandard", constructCommandStandard( name, commandData, initialSkipCount, definition ) }, { "enter", enter }, { "leave", leave }, { "newlineOnDefinition", definition ? "\n" : "" } } ) ); @@ -1381,6 +1463,7 @@ ${leave})"; void VulkanHppGenerator::appendCommandStandard( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition ) const { const std::string functionTemplate = R"( @@ -1390,17 +1473,19 @@ ${leave})"; std::string enter, leave; std::tie( enter, leave ) = generateProtection( commandData.feature, commandData.extensions ); - str += replaceWithMap( functionTemplate, - std::map( - { { "commandStandard", constructCommandStandard( name, commandData, definition ) }, - { "enter", enter }, - { "leave", leave } } ) ); + str += replaceWithMap( + functionTemplate, + std::map( + { { "commandStandard", constructCommandStandard( name, commandData, initialSkipCount, definition ) }, + { "enter", enter }, + { "leave", leave } } ) ); } void VulkanHppGenerator::appendCommandStandardAndEnhanced( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, std::vector const & nonConstPointerParamIndices ) const @@ -1421,69 +1506,74 @@ ${leave})"; case 0: if ( commandData.returnType == "void" ) { - commandEnhanced = constructCommandVoid( name, commandData, definition, vectorParamIndices ); + commandEnhanced = constructCommandVoid( name, commandData, initialSkipCount, definition, vectorParamIndices ); } else if ( commandData.returnType == "VkResult" ) { switch ( vectorParamIndices.size() ) { case 0: - case 1: commandEnhanced = constructCommandResult( name, commandData, definition, vectorParamIndices ); break; + case 1: + commandEnhanced = + constructCommandResult( name, commandData, initialSkipCount, definition, vectorParamIndices ); + break; case 2: if ( ( vectorParamIndices.begin()->second != INVALID_INDEX ) && ( vectorParamIndices.begin()->second == std::next( vectorParamIndices.begin() )->second ) && ( commandData.params[vectorParamIndices.begin()->second].type.isValue() ) ) { - commandEnhanced = - constructCommandResultGetTwoVectors( name, commandData, definition, vectorParamIndices ); + commandEnhanced = constructCommandResultGetTwoVectors( + name, commandData, initialSkipCount, definition, vectorParamIndices ); } break; } } else if ( vectorParamIndices.empty() ) { - commandEnhanced = constructCommandType( name, commandData, definition ); + commandEnhanced = constructCommandType( name, commandData, initialSkipCount, definition ); } break; case 1: if ( commandData.returnType == "void" ) { commandEnhanced = constructCommandVoidGetValue( - name, commandData, definition, vectorParamIndices, nonConstPointerParamIndices[0] ); + name, commandData, initialSkipCount, definition, vectorParamIndices, nonConstPointerParamIndices[0] ); } else if ( commandData.returnType == "VkResult" ) { - commandEnhanced = - constructCommandResultGetValue( name, commandData, definition, nonConstPointerParamIndices[0] ); + commandEnhanced = constructCommandResultGetValue( + name, commandData, initialSkipCount, definition, nonConstPointerParamIndices[0] ); } break; case 2: if ( ( commandData.returnType == "VkResult" ) && ( 1 < commandData.successCodes.size() ) ) { - commandEnhanced = - constructCommandResultGetTwoValues( name, commandData, definition, nonConstPointerParamIndices ); + commandEnhanced = constructCommandResultGetTwoValues( + name, commandData, initialSkipCount, definition, nonConstPointerParamIndices ); } break; } if ( commandEnhanced.empty() ) { - throw std::runtime_error( "Never encountered a function like " + name + " !" ); + throw std::runtime_error( "Never encountered a function like <" + name + "> !" ); } - str += replaceWithMap( functionTemplate, - std::map( - { { "commandEnhanced", commandEnhanced }, - { "commandStandard", constructCommandStandard( name, commandData, definition ) }, - { "enter", enter }, - { "leave", leave }, - { "newlineOnDefinition", definition ? "\n" : "" } } ) ); + str += replaceWithMap( + functionTemplate, + std::map( + { { "commandEnhanced", commandEnhanced }, + { "commandStandard", constructCommandStandard( name, commandData, initialSkipCount, definition ) }, + { "enter", enter }, + { "leave", leave }, + { "newlineOnDefinition", definition ? "\n" : "" } } ) ); } void VulkanHppGenerator::appendCommandStandardEnhancedDeprecatedAllocator( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, std::vector const & nonConstPointerParamIndices ) const @@ -1505,27 +1595,28 @@ ${leave})"; std::string enter, leave; std::tie( enter, leave ) = generateProtection( commandData.feature, commandData.extensions ); - str += - replaceWithMap( functionTemplate, - std::map( - { { "commandEnhanced", - constructCommandResultGetVectorAndValue( - name, commandData, definition, vectorParamIndices, nonConstPointerParamIndices, false ) }, - { "commandEnhancedDeprecated", - constructCommandResultGetValueDeprecated( - name, commandData, definition, vectorParamIndices, nonConstPointerParamIndices[1] ) }, - { "commandEnhancedWithAllocator", - constructCommandResultGetVectorAndValue( - name, commandData, definition, vectorParamIndices, nonConstPointerParamIndices, true ) }, - { "commandStandard", constructCommandStandard( name, commandData, definition ) }, - { "enter", enter }, - { "leave", leave }, - { "newlineOnDefinition", definition ? "\n" : "" } } ) ); + str += replaceWithMap( + functionTemplate, + std::map( + { { "commandEnhanced", + constructCommandResultGetVectorAndValue( + name, commandData, initialSkipCount, definition, vectorParamIndices, nonConstPointerParamIndices, false ) }, + { "commandEnhancedDeprecated", + constructCommandResultGetValueDeprecated( + name, commandData, initialSkipCount, definition, vectorParamIndices, nonConstPointerParamIndices[1] ) }, + { "commandEnhancedWithAllocator", + constructCommandResultGetVectorAndValue( + name, commandData, initialSkipCount, definition, vectorParamIndices, nonConstPointerParamIndices, true ) }, + { "commandStandard", constructCommandStandard( name, commandData, initialSkipCount, definition ) }, + { "enter", enter }, + { "leave", leave }, + { "newlineOnDefinition", definition ? "\n" : "" } } ) ); } void VulkanHppGenerator::appendCommandStandardOrEnhanced( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition ) const { assert( commandData.returnType == "VkResult" ); @@ -1542,17 +1633,19 @@ ${leave} std::string enter, leave; std::tie( enter, leave ) = generateProtection( commandData.feature, commandData.extensions ); - str += replaceWithMap( functionTemplate, - std::map( - { { "commandEnhanced", constructCommandResult( name, commandData, definition, {} ) }, - { "commandStandard", constructCommandStandard( name, commandData, definition ) }, - { "enter", enter }, - { "leave", leave } } ) ); + str += replaceWithMap( + functionTemplate, + std::map( + { { "commandEnhanced", constructCommandResult( name, commandData, initialSkipCount, definition, {} ) }, + { "commandStandard", constructCommandStandard( name, commandData, initialSkipCount, definition ) }, + { "enter", enter }, + { "leave", leave } } ) ); } void VulkanHppGenerator::appendCommandUnique( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, size_t nonConstPointerIndex, bool definition ) const { @@ -1572,10 +1665,12 @@ ${leave})"; str += replaceWithMap( functionTemplate, std::map( - { { "commandEnhanced", constructCommandResultGetValue( name, commandData, definition, nonConstPointerIndex ) }, + { { "commandEnhanced", + constructCommandResultGetValue( name, commandData, initialSkipCount, definition, nonConstPointerIndex ) }, { "commandEnhancedUnique", - constructCommandResultGetHandleUnique( name, commandData, definition, nonConstPointerIndex ) }, - { "commandStandard", constructCommandStandard( name, commandData, definition ) }, + constructCommandResultGetHandleUnique( + name, commandData, initialSkipCount, definition, nonConstPointerIndex ) }, + { "commandStandard", constructCommandStandard( name, commandData, initialSkipCount, definition ) }, { "enter", enter }, { "leave", leave }, { "newlineOnDefinition", definition ? "\n" : "" } } ) ); @@ -1584,6 +1679,7 @@ ${leave})"; void VulkanHppGenerator::appendCommandVector( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::pair const & vectorParamIndex, std::vector const & returnParamIndices ) const @@ -1601,28 +1697,30 @@ ${commandEnhancedWithAllocators} #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ ${leave})"; - str += - replaceWithMap( functionTemplate, - std::map( - { { "commandEnhanced", - ( commandData.returnType == "VkResult" ) - ? constructCommandResultEnumerate( name, commandData, definition, vectorParamIndex, false ) - : constructCommandVoidEnumerate( - name, commandData, definition, vectorParamIndex, returnParamIndices, false ) }, - { "commandEnhancedWithAllocators", - ( commandData.returnType == "VkResult" ) - ? constructCommandResultEnumerate( name, commandData, definition, vectorParamIndex, true ) - : constructCommandVoidEnumerate( - name, commandData, definition, vectorParamIndex, returnParamIndices, true ) }, - { "commandStandard", constructCommandStandard( name, commandData, definition ) }, - { "enter", enter }, - { "leave", leave }, - { "newlineOnDefinition", definition ? "\n" : "" } } ) ); + str += replaceWithMap( + functionTemplate, + std::map( + { { "commandEnhanced", + ( commandData.returnType == "VkResult" ) + ? constructCommandResultEnumerate( + name, commandData, initialSkipCount, definition, vectorParamIndex, false ) + : constructCommandVoidEnumerate( + name, commandData, initialSkipCount, definition, vectorParamIndex, returnParamIndices, false ) }, + { "commandEnhancedWithAllocators", + ( commandData.returnType == "VkResult" ) + ? constructCommandResultEnumerate( name, commandData, initialSkipCount, definition, vectorParamIndex, true ) + : constructCommandVoidEnumerate( + name, commandData, initialSkipCount, definition, vectorParamIndex, returnParamIndices, true ) }, + { "commandStandard", constructCommandStandard( name, commandData, initialSkipCount, definition ) }, + { "enter", enter }, + { "leave", leave }, + { "newlineOnDefinition", definition ? "\n" : "" } } ) ); } void VulkanHppGenerator::appendCommandVectorChained( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, std::vector const & returnParamIndices ) const @@ -1648,27 +1746,54 @@ ${leave})"; std::map( { { "commandEnhanced", ( commandData.returnType == "VkResult" ) - ? constructCommandResultEnumerate( name, commandData, definition, *vectorParamIndices.begin(), false ) - : constructCommandVoidEnumerate( - name, commandData, definition, *vectorParamIndices.begin(), returnParamIndices, false ) }, + ? constructCommandResultEnumerate( + name, commandData, initialSkipCount, definition, *vectorParamIndices.begin(), false ) + : constructCommandVoidEnumerate( name, + commandData, + initialSkipCount, + definition, + *vectorParamIndices.begin(), + returnParamIndices, + false ) }, { "commandEnhancedChained", ( commandData.returnType == "VkResult" ) - ? constructCommandResultEnumerateChained( - name, commandData, definition, *vectorParamIndices.begin(), returnParamIndices, false ) - : constructCommandVoidEnumerateChained( - name, commandData, definition, *vectorParamIndices.begin(), returnParamIndices, false ) }, + ? constructCommandResultEnumerateChained( name, + commandData, + initialSkipCount, + definition, + *vectorParamIndices.begin(), + returnParamIndices, + false ) + : constructCommandVoidEnumerateChained( name, + commandData, + initialSkipCount, + definition, + *vectorParamIndices.begin(), + returnParamIndices, + false ) }, { "commandEnhancedChainedWithAllocator", ( commandData.returnType == "VkResult" ) ? constructCommandResultEnumerateChained( - name, commandData, definition, *vectorParamIndices.begin(), returnParamIndices, true ) - : constructCommandVoidEnumerateChained( - name, commandData, definition, *vectorParamIndices.begin(), returnParamIndices, true ) }, + name, commandData, initialSkipCount, definition, *vectorParamIndices.begin(), returnParamIndices, true ) + : constructCommandVoidEnumerateChained( name, + commandData, + initialSkipCount, + definition, + *vectorParamIndices.begin(), + returnParamIndices, + true ) }, { "commandEnhancedWithAllocator", ( commandData.returnType == "VkResult" ) - ? constructCommandResultEnumerate( name, commandData, definition, *vectorParamIndices.begin(), true ) - : constructCommandVoidEnumerate( - name, commandData, definition, *vectorParamIndices.begin(), returnParamIndices, true ) }, - { "commandStandard", constructCommandStandard( name, commandData, definition ) }, + ? constructCommandResultEnumerate( + name, commandData, initialSkipCount, definition, *vectorParamIndices.begin(), true ) + : constructCommandVoidEnumerate( name, + commandData, + initialSkipCount, + definition, + *vectorParamIndices.begin(), + returnParamIndices, + true ) }, + { "commandStandard", constructCommandStandard( name, commandData, initialSkipCount, definition ) }, { "enter", enter }, { "leave", leave }, { "newlineOnDefinition", definition ? "\n" : "" } } ) ); @@ -1677,6 +1802,7 @@ ${leave})"; void VulkanHppGenerator::appendCommandVectorDeprecated( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, std::map const & vectorParamIndices, std::vector const & returnParamIndices, bool definition ) const @@ -1698,27 +1824,29 @@ ${commandEnhancedWithAllocators} #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ )"; - str += replaceWithMap( functionTemplate, - std::map( - { { "commandEnhanced", - constructCommandResultEnumerateTwoVectors( - name, commandData, definition, vectorParamIndices, returnParamIndices, false ) }, - { "commandEnhancedDeprecated", - constructCommandResultEnumerateTwoVectorsDeprecated( - name, commandData, definition, vectorParamIndices, false ) }, - { "commandEnhancedWithAllocators", - constructCommandResultEnumerateTwoVectors( - name, commandData, definition, vectorParamIndices, returnParamIndices, true ) }, - { "commandEnhancedWithAllocatorsDeprecated", - constructCommandResultEnumerateTwoVectorsDeprecated( - name, commandData, definition, vectorParamIndices, true ) }, - { "commandStandard", constructCommandStandard( name, commandData, definition ) }, - { "newlineOnDefinition", definition ? "\n" : "" } } ) ); + str += replaceWithMap( + functionTemplate, + std::map( + { { "commandEnhanced", + constructCommandResultEnumerateTwoVectors( + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndices, false ) }, + { "commandEnhancedDeprecated", + constructCommandResultEnumerateTwoVectorsDeprecated( + name, commandData, initialSkipCount, definition, vectorParamIndices, false ) }, + { "commandEnhancedWithAllocators", + constructCommandResultEnumerateTwoVectors( + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndices, true ) }, + { "commandEnhancedWithAllocatorsDeprecated", + constructCommandResultEnumerateTwoVectorsDeprecated( + name, commandData, initialSkipCount, definition, vectorParamIndices, true ) }, + { "commandStandard", constructCommandStandard( name, commandData, initialSkipCount, definition ) }, + { "newlineOnDefinition", definition ? "\n" : "" } } ) ); } void VulkanHppGenerator::appendCommandVectorSingularUnique( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, std::map const & vectorParamIndices, size_t returnParamIndex, bool definition ) const @@ -1742,35 +1870,37 @@ ${leave})"; std::string enter, leave; std::tie( enter, leave ) = generateProtection( commandData.feature, commandData.extensions ); - str += replaceWithMap( functionTemplate, - std::map( - { { "commandEnhanced", - constructCommandResultGetVectorOfHandles( - name, commandData, definition, vectorParamIndices, returnParamIndex, false ) }, - { "commandEnhancedSingular", - constructCommandResultGetVectorOfHandlesSingular( - name, commandData, definition, vectorParamIndices, returnParamIndex ) }, - { "commandEnhancedUnique", - constructCommandResultGetVectorOfHandlesUnique( - name, commandData, definition, vectorParamIndices, returnParamIndex, false ) }, - { "commandEnhancedUniqueSingular", - constructCommandResultGetVectorOfHandlesUniqueSingular( - name, commandData, definition, vectorParamIndices, returnParamIndex ) }, - { "commandEnhancedUniqueWithAllocators", - constructCommandResultGetVectorOfHandlesUnique( - name, commandData, definition, vectorParamIndices, returnParamIndex, true ) }, - { "commandEnhancedWithAllocators", - constructCommandResultGetVectorOfHandles( - name, commandData, definition, vectorParamIndices, returnParamIndex, true ) }, - { "commandStandard", constructCommandStandard( name, commandData, definition ) }, - { "enter", enter }, - { "leave", leave }, - { "newlineOnDefinition", definition ? "\n" : "" } } ) ); + str += replaceWithMap( + functionTemplate, + std::map( + { { "commandEnhanced", + constructCommandResultGetVectorOfHandles( + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndex, false ) }, + { "commandEnhancedSingular", + constructCommandResultGetVectorOfHandlesSingular( + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndex ) }, + { "commandEnhancedUnique", + constructCommandResultGetVectorOfHandlesUnique( + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndex, false ) }, + { "commandEnhancedUniqueSingular", + constructCommandResultGetVectorOfHandlesUniqueSingular( + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndex ) }, + { "commandEnhancedUniqueWithAllocators", + constructCommandResultGetVectorOfHandlesUnique( + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndex, true ) }, + { "commandEnhancedWithAllocators", + constructCommandResultGetVectorOfHandles( + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndex, true ) }, + { "commandStandard", constructCommandStandard( name, commandData, initialSkipCount, definition ) }, + { "enter", enter }, + { "leave", leave }, + { "newlineOnDefinition", definition ? "\n" : "" } } ) ); } void VulkanHppGenerator::appendCommandVectorUnique( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, std::map const & vectorParamIndices, size_t returnParamIndex, bool definition ) const @@ -1792,24 +1922,25 @@ ${leave})"; std::string enter, leave; std::tie( enter, leave ) = generateProtection( commandData.feature, commandData.extensions ); - str += replaceWithMap( functionTemplate, - std::map( - { { "commandEnhanced", - constructCommandResultGetVectorOfHandles( - name, commandData, definition, vectorParamIndices, returnParamIndex, false ) }, - { "commandEnhancedUnique", - constructCommandResultGetVectorOfHandlesUnique( - name, commandData, definition, vectorParamIndices, returnParamIndex, false ) }, - { "commandEnhancedUniqueWithAllocators", - constructCommandResultGetVectorOfHandlesUnique( - name, commandData, definition, vectorParamIndices, returnParamIndex, true ) }, - { "commandEnhancedWithAllocators", - constructCommandResultGetVectorOfHandles( - name, commandData, definition, vectorParamIndices, returnParamIndex, true ) }, - { "commandStandard", constructCommandStandard( name, commandData, definition ) }, - { "enter", enter }, - { "leave", leave }, - { "newlineOnDefinition", definition ? "\n" : "" } } ) ); + str += replaceWithMap( + functionTemplate, + std::map( + { { "commandEnhanced", + constructCommandResultGetVectorOfHandles( + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndex, false ) }, + { "commandEnhancedUnique", + constructCommandResultGetVectorOfHandlesUnique( + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndex, false ) }, + { "commandEnhancedUniqueWithAllocators", + constructCommandResultGetVectorOfHandlesUnique( + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndex, true ) }, + { "commandEnhancedWithAllocators", + constructCommandResultGetVectorOfHandles( + name, commandData, initialSkipCount, definition, vectorParamIndices, returnParamIndex, true ) }, + { "commandStandard", constructCommandStandard( name, commandData, initialSkipCount, definition ) }, + { "enter", enter }, + { "leave", leave }, + { "newlineOnDefinition", definition ? "\n" : "" } } ) ); } void VulkanHppGenerator::appendDispatchLoaderDynamic( std::string & str ) @@ -1936,6 +2067,7 @@ void VulkanHppGenerator::appendDispatchLoaderDynamic( std::string & str ) str += R"( public: DispatchLoaderDynamic() VULKAN_HPP_NOEXCEPT = default; + DispatchLoaderDynamic( DispatchLoaderDynamic const & rhs ) VULKAN_HPP_NOEXCEPT = default; #if !defined(VK_NO_PROTOTYPES) // This interface is designed to be used for per-device function pointers in combination with a linked vulkan library. @@ -2415,6 +2547,7 @@ void VulkanHppGenerator::appendFunctionBodyEnhancedMultiVectorSizeCheck( std::string const & indentation, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, size_t returnParamIndex, std::map const & vectorParamIndices ) const { @@ -2430,7 +2563,7 @@ ${i} } )#"; // add some error checks if multiple vectors need to have the same size - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); for ( std::map::const_iterator it0 = vectorParamIndices.begin(); it0 != vectorParamIndices.end(); ++it0 ) { @@ -2445,7 +2578,7 @@ ${i} } std::map( { { "firstVectorName", startLowerCase( stripPrefix( commandData.params[it0->first].name, "p" ) ) }, { "secondVectorName", startLowerCase( stripPrefix( commandData.params[it1->first].name, "p" ) ) }, - { "className", commandData.handle }, + { "className", commandData.params[initialSkipCount - 1].type.type }, { "commandName", commandName }, { "i", indentation } } ) ); } @@ -2459,6 +2592,7 @@ void VulkanHppGenerator::appendFunctionBodyEnhancedReturnResultValue( std::strin std::string const & returnName, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, size_t returnParamIndex, bool twoStep ) const { @@ -2466,7 +2600,7 @@ void VulkanHppGenerator::appendFunctionBodyEnhancedReturnResultValue( std::strin std::string returnVectorName = ( returnParamIndex != INVALID_INDEX ) ? stripPostfix( stripPrefix( commandData.params[returnParamIndex].name, "p" ), "s" ) : ""; - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); assert( commandData.returnType != "void" ); @@ -2660,7 +2794,7 @@ void VulkanHppGenerator::appendHandle( std::string & str, std::pairfirst, commandIt->second, false ); + appendCommand( str, commandIt->first, commandIt->second, 0, false ); } } else @@ -2684,9 +2818,9 @@ void VulkanHppGenerator::appendHandle( std::string & str, std::pairfirst, commandIt->second.params[0].type.type ); + std::string commandName = determineCommandName( commandIt->first, commandIt->second.params[0].type.type, m_tags ); commands += "\n"; - appendCommand( commands, commandIt->first, commandIt->second, false ); + appendCommand( commands, commandIt->first, commandIt->second, 1, false ); // special handling for destroy functions if ( ( ( commandIt->first.substr( 2, 7 ) == "Destroy" ) && ( commandName != "destroy" ) ) || @@ -2702,7 +2836,11 @@ void VulkanHppGenerator::appendHandle( std::string & str, std::pairfirst, commandData, false ); + assert( ( 1 < commandData.params.size() ) && ( commandData.params[0].type.type == handleData.first ) ); + commandData.params[1].optional = + false; // make sure, the object to destroy/free/release is not optional in the shortened version! + + appendCommand( destroyCommandString, commandIt->first, commandData, 1, false ); std::string shortenedName; if ( commandIt->first.substr( 2, 7 ) == "Destroy" ) { @@ -2891,7 +3029,6 @@ ${CppTypeFromDebugReportObjectTypeEXT} void VulkanHppGenerator::appendHandles( std::string & str ) { - std::set listedHandles; for ( auto const & handle : m_handles ) { if ( m_listedTypes.find( handle.first ) == m_listedTypes.end() ) @@ -2916,10 +3053,10 @@ void VulkanHppGenerator::appendHandlesCommandDefinitions( std::string & str ) co std::string strippedName = startLowerCase( stripPrefix( commandIt->first, "vk" ) ); str += "\n"; - appendCommand( str, commandIt->first, commandIt->second, true ); + appendCommand( str, commandIt->first, commandIt->second, handle.first.empty() ? 0 : 1, true ); // special handling for destroy functions - std::string commandName = determineCommandName( commandIt->first, commandIt->second.params[0].type.type ); + std::string commandName = determineCommandName( commandIt->first, commandIt->second.params[0].type.type, m_tags ); if ( ( ( commandIt->first.substr( 2, 7 ) == "Destroy" ) && ( commandName != "destroy" ) ) || ( commandIt->first.substr( 2, 4 ) == "Free" ) || ( commandIt->first == "vkReleasePerformanceConfigurationINTEL" ) ) @@ -2928,15 +3065,17 @@ void VulkanHppGenerator::appendHandlesCommandDefinitions( std::string & str ) co // in case there are aliases to this function, filter them out here CommandData commandData = commandIt->second; commandData.aliasData.clear(); - bool complex = needsComplexBody( commandIt->second ); if ( complex ) { commandData.extensions.clear(); commandData.feature.clear(); } + assert( ( 1 < commandData.params.size() ) && ( commandData.params[0].type.type == handle.first ) ); + commandData.params[1].optional = + false; // make sure, the object to destroy/free/release is not optional in the shortened version! - appendCommand( destroyCommandString, commandIt->first, commandData, true ); + appendCommand( destroyCommandString, commandIt->first, commandData, handle.first.empty() ? 0 : 1, true ); std::string shortenedName; if ( commandIt->first.substr( 2, 7 ) == "Destroy" ) { @@ -3008,11 +3147,6 @@ void VulkanHppGenerator::appendHandlesCommandDefinitions( std::string & str ) co void VulkanHppGenerator::appendHashStructures( std::string & str ) const { - str += - "\n" - "namespace std\n" - "{\n"; - const std::string hashTemplate = R"( template <> struct hash { std::size_t operator()(VULKAN_HPP_NAMESPACE::${type} const& ${name}) const VULKAN_HPP_NOEXCEPT @@ -3035,8 +3169,45 @@ void VulkanHppGenerator::appendHashStructures( std::string & str ) const str += leave; } } +} - str += "}\n"; +void VulkanHppGenerator::appendRAIIHandles( std::string & str, std::string & commandDefinitions ) +{ + // Enum -> Type translations are always able to occur. + str += "\n"; + + // filtering out functions that are not usefull on this level of abstraction (like vkGetInstanceProcAddr) + // and all the construction and destruction functions, as they are used differently + + std::set specialFunctions; + for ( auto & handle : m_handles ) + { + if ( !handle.first.empty() ) + { + handle.second.destructorIt = determineRAIIHandleDestructor( handle.first ); + handle.second.constructorIts = determineRAIIHandleConstructors( handle.first, handle.second.destructorIt ); + if ( handle.second.destructorIt != m_commands.end() ) + { + specialFunctions.insert( handle.second.destructorIt->first ); + } + for ( auto const & constructorIt : handle.second.constructorIts ) + { + specialFunctions.insert( constructorIt->first ); + } + } + } + + distributeSecondLevelCommands( specialFunctions ); + renameFunctionParameters(); + + std::set listedHandles; + auto handleIt = m_handles.begin(); + assert( handleIt->first.empty() ); + appendRAIIHandleContext( str, commandDefinitions, *handleIt, specialFunctions ); + for ( ++handleIt; handleIt != m_handles.end(); ++handleIt ) + { + appendRAIIHandle( str, commandDefinitions, *handleIt, listedHandles, specialFunctions ); + } } // Intended only for `enum class Result`! @@ -3073,6 +3244,201 @@ ${leave})"; } } +void VulkanHppGenerator::appendRAIIHandle( std::string & str, + std::string & commandDefinitions, + std::pair const & handle, + std::set & listedHandles, + std::set const & specialFunctions ) const +{ + if ( listedHandles.find( handle.first ) == listedHandles.end() ) + { + rescheduleRAIIHandle( str, commandDefinitions, handle, listedHandles, specialFunctions ); + + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( handle.first, !handle.second.alias.empty() ); + std::string handleType = stripPrefix( handle.first, "Vk" ); + std::string handleName = startLowerCase( handleType ); + + std::string singularConstructors, arrayConstructors; + std::tie( singularConstructors, arrayConstructors ) = constructRAIIHandleConstructors( handle ); + std::string upgradeConstructor = arrayConstructors.empty() ? "" : constructRAIIHandleUpgradeConstructor( handle ); + std::string destructor, destructorCall; + std::tie( destructor, destructorCall ) = + ( handle.second.destructorIt == m_commands.end() ) + ? std::make_pair( "", "" ) + : constructRAIIHandleDestructor( handle.first, handle.second.destructorIt, enter ); + + std::string getConstructorSuccessCode, memberVariables, moveConstructorInitializerList, moveAssignmentInstructions; + std::tie( getConstructorSuccessCode, memberVariables, moveConstructorInitializerList, moveAssignmentInstructions ) = + constructRAIIHandleDetails( handle, destructorCall ); + + std::string declarations, definitions; + std::tie( declarations, definitions ) = constructRAIIHandleMemberFunctions( handle, specialFunctions ); + commandDefinitions += definitions; + + assert( !handle.second.objTypeEnum.empty() ); + auto enumIt = m_enums.find( "VkObjectType" ); + assert( enumIt != m_enums.end() ); + auto valueIt = + std::find_if( enumIt->second.values.begin(), enumIt->second.values.end(), [&handle]( EnumValueData const & evd ) { + return evd.vulkanValue == handle.second.objTypeEnum; + } ); + assert( valueIt != enumIt->second.values.end() ); + std::string objTypeEnum = valueIt->vkValue; + + enumIt = m_enums.find( "VkDebugReportObjectTypeEXT" ); + assert( enumIt != m_enums.end() ); + valueIt = std::find_if( enumIt->second.values.begin(), + enumIt->second.values.end(), + [&handleType]( EnumValueData const & evd ) { return evd.vkValue == "e" + handleType; } ); + std::string debugReportObjectType = ( valueIt != enumIt->second.values.end() ) ? valueIt->vkValue : "eUnknown"; + + const std::string handleTemplate = R"(${enter} class ${handleType} + { + public: + using CType = Vk${handleType}; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::${objTypeEnum}; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::${debugReportObjectType}; + + public: +${singularConstructors} +${upgradeConstructor} +${destructor} + + ${handleType}() = delete; + ${handleType}( ${handleType} const & ) = delete; + ${handleType}( ${handleType} && rhs ) + : ${moveConstructorInitializerList} + {} + ${handleType} & operator=( ${handleType} const & ) = delete; + ${handleType} & operator=( ${handleType} && rhs ) + { + if ( this != &rhs ) + { +${moveAssignmentInstructions} + } + return *this; + } +${memberFunctionsDeclarations} + + VULKAN_HPP_NAMESPACE::${handleType} const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_${handleName}; + } + +${getConstructorSuccessCode} + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return ${getDispatcherReturn}m_dispatcher; + } + + private: + ${memberVariables} + }; + +${leave})"; + + str += replaceWithMap( + handleTemplate, + { { "debugReportObjectType", debugReportObjectType }, + { "destructor", destructor }, + { "enter", enter }, + { "getConstructorSuccessCode", getConstructorSuccessCode }, + { "getDispatcherReturn", ( handleType == "Device" ) || ( handleType == "Instance" ) ? "&" : "" }, + { "handleName", handleName }, + { "handleType", handleType }, + { "leave", leave }, + { "memberFunctionsDeclarations", declarations }, + { "memberVariables", memberVariables }, + { "moveAssignmentInstructions", moveAssignmentInstructions }, + { "moveConstructorInitializerList", moveConstructorInitializerList }, + { "objTypeEnum", objTypeEnum }, + { "singularConstructors", singularConstructors }, + { "upgradeConstructor", upgradeConstructor } } ); + + if ( !arrayConstructors.empty() ) + { + // it's a handle class with a friendly handles class + const std::string handlesTemplate = R"( +${enter} class ${handleType}s : public std::vector + { + public: + ${arrayConstructors} + + ${handleType}s() = delete; + ${handleType}s( ${handleType}s const & ) = delete; + ${handleType}s( ${handleType}s && rhs ) = default; + ${handleType}s & operator=( ${handleType}s const & ) = delete; + ${handleType}s & operator=( ${handleType}s && rhs ) = default; + }; +${leave} +)"; + + str += replaceWithMap( handlesTemplate, + { { "arrayConstructors", arrayConstructors }, + { "enter", enter }, + { "handleType", handleType }, + { "leave", leave } } ); + } + } +} + +void VulkanHppGenerator::appendRAIIHandleContext( std::string & str, + std::string & commandDefinitions, + std::pair const & handle, + std::set const & specialFunctions ) const +{ + assert( commandDefinitions.empty() ); + + const std::string contextTemplate = R"( + class Context + { + public: + Context() + { + PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr = m_dynamicLoader.getProcAddress( "vkGetInstanceProcAddr" ); + m_dispatcher.init( vkGetInstanceProcAddr ); + } + + ~Context() = default; + + Context( Context const & ) = delete; + Context( Context && rhs ) + : m_dynamicLoader( std::move( rhs.m_dynamicLoader ) ) + , m_dispatcher( std::move( rhs.m_dispatcher ) ) + {} + Context & operator=( Context const & ) = delete; + Context & operator=( Context && rhs ) + { + if ( this != &rhs ) + { + m_dynamicLoader = std::move( rhs.m_dynamicLoader ); + m_dispatcher = std::move( rhs.m_dispatcher ); + } + return *this; + } + +${memberFunctionDeclarations} + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return &m_dispatcher; + } + + private: + vk::DynamicLoader m_dynamicLoader; + VULKAN_HPP_RAII_DISPATCHER_TYPE m_dispatcher; + }; + +)"; + + std::string declarations; + std::tie( declarations, commandDefinitions ) = constructRAIIHandleMemberFunctions( handle, specialFunctions ); + + str += replaceWithMap( contextTemplate, { { "memberFunctionDeclarations", declarations } } ); +} + void VulkanHppGenerator::appendStruct( std::string & str, std::pair const & structure ) { assert( m_listingTypes.find( structure.first ) == m_listingTypes.end() ); @@ -3179,25 +3545,66 @@ void VulkanHppGenerator::appendStructCompareOperators( std::string & { { "name", stripPrefix( structData.first, "Vk" ) }, { "compareMembers", compareMembers } } ); } +bool VulkanHppGenerator::checkEquivalentSingularConstructor( + std::vector::const_iterator> const & constructorIts, + std::map::const_iterator constructorIt, + std::vector::const_iterator lenIt ) const +{ + // check, if there is no singular constructor with the very same arguments as this array constructor + // (besides the size, of course) + auto singularCommandIt = + std::find_if( constructorIts.begin(), + constructorIts.end(), + [constructorIt, lenIt]( std::map::const_iterator it ) { + if ( it->second.params.size() + 1 != constructorIt->second.params.size() ) + { + return false; + } + size_t lenIdx = std::distance( constructorIt->second.params.begin(), lenIt ); + for ( size_t i = 0, j = 0; i < it->second.params.size(); ++i, ++j ) + { + assert( j < constructorIt->second.params.size() ); + if ( j == lenIdx ) + { + ++j; + } + if ( it->second.params[i].type.type != constructorIt->second.params[j].type.type ) + { + return false; + } + } + return true; + } ); + return ( singularCommandIt != constructorIts.end() ); +} + std::string VulkanHppGenerator::constructArgumentListEnhanced( std::vector const & params, std::set const & skippedParams, size_t singularParam, bool definition, bool withAllocators, - bool structureChain ) const + bool structureChain, + bool withDispatcher ) const { size_t defaultStartIndex = withAllocators ? ~0 : determineDefaultStartIndex( params, skippedParams ); std::string argumentList; + bool encounteredArgument = false; for ( size_t i = 0; i < params.size(); ++i ) { if ( skippedParams.find( i ) == skippedParams.end() ) { + if ( encounteredArgument ) + { + argumentList += ", "; + } bool hasDefaultAssignment = false; if ( i == singularParam ) { + assert( !params[i].optional ); assert( params[i].type.isConstPointer() && !params[i].len.empty() && !isLenByStructMember( params[i].len, params ) && beginsWith( params[i].type.type, "Vk" ) ); + assert( !isHandleType( params[i].type.type ) ); argumentList += "const VULKAN_HPP_NAMESPACE::" + stripPrefix( params[i].type.type, "Vk" ) + " & " + stripPluralS( startLowerCase( stripPrefix( params[i].name, "p" ) ) ); } @@ -3206,10 +3613,12 @@ std::string VulkanHppGenerator::constructArgumentListEnhanced( std::vector const & " + name; if ( params[i].optional && !definition ) { @@ -3264,6 +3674,7 @@ std::string VulkanHppGenerator::constructArgumentListEnhanced( std::vector const & params, - bool nonConstPointerAsNullptr, - size_t singularParamIndex ) const +std::string VulkanHppGenerator::constructCallArgumentEnhanced( ParamData const & param, + std::vector const & params, + bool nonConstPointerAsNullptr, + size_t singularParamIndex, + bool raiiHandleMemberFunction ) const { + std::string argument; + if ( param.type.isConstPointer() || ( specialPointerTypes.find( param.type.type ) != specialPointerTypes.end() ) ) + { + std::string name = startLowerCase( stripPrefix( param.name, "p" ) ); + if ( isHandleType( param.type.type ) && param.type.isValue() ) + { + assert( !param.optional ); + // if at all, this is the first argument, and it's the implicitly provided member handle + assert( param.name == params[0].name ); + assert( param.arraySizes.empty() && param.len.empty() ); + argument += "m_" + startLowerCase( stripPrefix( param.type.type, "Vk" ) ); + } + else if ( param.len.empty() ) + { + // this const-pointer parameter has no length, that is it's a const-pointer to a single value + if ( param.type.type == "void" ) + { + assert( !param.optional ); + // use the original name here, as void-pointer are not mapped to some reference + argument = param.name; + } + else if ( param.optional ) + { + argument = "static_cast<" + param.type.compose() + ">( " + name + " )"; + } + else + { + argument = "&" + name; + } + if ( beginsWith( param.type.type, "Vk" ) ) + { + argument = "reinterpret_cast( " + argument + " )"; + } + } + else if ( param.len == "null-terminated" ) + { + // this const-pointer parameter is "null-terminated", that is it's a string + assert( ( param.type.type == "char" ) && param.arraySizes.empty() ); + if ( param.optional ) + { + argument = name + " ? " + name + "->c_str() : nullptr"; + } + else + { + argument = name + ".c_str()"; + } + } + else + { + // this const-pointer parameter has some explicit length + if ( ( singularParamIndex != INVALID_INDEX ) && ( params[singularParamIndex].len == param.len ) ) + { + assert( !param.optional ); + argument = "&" + stripPluralS( name ); + } + else + { + // this const-parameter is represented by some array, where data() also works with no data (optional) + argument = name + ".data()"; + } + if ( beginsWith( param.type.type, "Vk" ) || ( param.type.type == "void" ) ) + { + argument = "reinterpret_cast<" + param.type.prefix + " " + param.type.type + " " + param.type.postfix + ">( " + + argument + " )"; + } + } + } + else if ( param.type.isNonConstPointer() && + ( specialPointerTypes.find( param.type.type ) == specialPointerTypes.end() ) ) + { + // parameter is a non-const pointer (and none of the special pointer types, that are considered const-pointers, even + // though they are not!) + assert( beginsWith( param.name, "p" ) ); + std::string name = startLowerCase( stripPrefix( param.name, "p" ) ); + if ( param.len.empty() ) + { + assert( param.arraySizes.empty() && !param.optional ); + if ( beginsWith( param.type.type, "Vk" ) ) + { + argument = "reinterpret_cast<" + param.type.type + " *>( &" + name + " )"; + } + else + { + argument = "&" + name; + } + } + else + { + // the non-const pointer has a len -> it will be represented by some array + assert( param.arraySizes.empty() ); + if ( nonConstPointerAsNullptr ) + { + argument = "nullptr"; + } + else if ( beginsWith( param.type.type, "Vk" ) || ( param.type.type == "void" ) ) + { + if ( ( singularParamIndex != INVALID_INDEX ) && ( params[singularParamIndex].name == param.name ) ) + { + assert( !param.optional ); + argument = "&" + stripPluralS( name ); + } + else + { + // get the data of the array, which also covers no data -> no need to look at param.optional + argument = name + ".data()"; + } + if ( !raiiHandleMemberFunction || !isHandleType( param.type.type ) ) + { + argument = "reinterpret_cast<" + param.type.type + " *>( " + argument + " )"; + } + } + else + { + assert( !param.optional ); + argument = name + ".data()"; + } + } + } + else + { + assert( param.len.empty() ); + if ( beginsWith( param.type.type, "Vk" ) ) + { + if ( param.arraySizes.empty() ) + { + auto pointerIt = std::find_if( + params.begin(), params.end(), [¶m]( ParamData const & pd ) { return pd.len == param.name; } ); + if ( pointerIt != params.end() ) + { + assert( !param.optional ); + argument = startLowerCase( stripPrefix( pointerIt->name, "p" ) ) + ".size()"; + if ( pointerIt->type.type == "void" ) + { + argument += " * sizeof( T )"; + } + } + else + { + argument = "static_cast<" + param.type.type + ">( " + param.name + " )"; + } + } + else + { + assert( !param.optional ); + assert( param.arraySizes.size() == 1 ); + assert( param.type.prefix == "const" ); + argument = "reinterpret_cast( " + param.name + " )"; + } + } + else + { + if ( ( singularParamIndex != INVALID_INDEX ) && ( params[singularParamIndex].len == param.name ) ) + { + assert( !param.optional ); + assert( param.arraySizes.empty() ); + assert( ( param.type.type == "size_t" ) || ( param.type.type == "uint32_t" ) ); + if ( params[singularParamIndex].type.type == "void" ) + { + argument = "sizeof( T )"; + } + else + { + argument = "1"; + } + } + else + { + auto pointerIt = std::find_if( + params.begin(), params.end(), [¶m]( ParamData const & pd ) { return pd.len == param.name; } ); + if ( pointerIt != params.end() ) + { + // this parameter is the len of some other -> replace it with that parameter's size + assert( param.arraySizes.empty() ); + assert( ( param.type.type == "size_t" ) || ( param.type.type == "uint32_t" ) ); + argument = startLowerCase( stripPrefix( pointerIt->name, "p" ) ) + ".size()"; + if ( pointerIt->type.type == "void" ) + { + argument += " * sizeof( T )"; + } + } + else + { + assert( !param.optional ); + assert( param.arraySizes.size() <= 1 ); + argument = param.name; + } + } + } + } + assert( !argument.empty() ); + return argument; +} + +std::string VulkanHppGenerator::constructCallArgumentsEnhanced( std::vector const & params, + size_t initialSkipCount, + bool nonConstPointerAsNullptr, + size_t singularParamIndex, + bool raiiHandleMemberFunction ) const +{ + assert( initialSkipCount <= params.size() ); std::string arguments; bool encounteredArgument = false; - for ( auto const & param : params ) + for ( size_t i = 0; i < initialSkipCount; ++i ) { if ( encounteredArgument ) { arguments += ", "; } - if ( ( param.type.type == handle ) && param.type.isValue() ) + assert( isHandleType( params[i].type.type ) && params[i].type.isValue() ); + assert( params[i].arraySizes.empty() && params[i].len.empty() ); + std::string argument = "m_" + startLowerCase( stripPrefix( params[i].type.type, "Vk" ) ); + if ( raiiHandleMemberFunction ) { - // if at all, this is the first argument, and it's the implicitly provided member handle - assert( param.name == params[0].name ); - assert( param.arraySizes.empty() && param.len.empty() ); - arguments += "m_" + startLowerCase( stripPrefix( param.type.type, "Vk" ) ); + argument = "static_cast<" + params[i].type.type + ">( " + argument + " )"; } - else if ( param.type.isConstPointer() || - ( specialPointerTypes.find( param.type.type ) != specialPointerTypes.end() ) ) + arguments += argument; + encounteredArgument = true; + } + for ( size_t i = initialSkipCount; i < params.size(); ++i ) + { + if ( encounteredArgument ) { - std::string name = startLowerCase( stripPrefix( param.name, "p" ) ); - if ( param.len.empty() ) - { - assert( param.arraySizes.empty() ); - if ( beginsWith( param.type.type, "Vk" ) ) - { - if ( param.optional ) - { - name = "static_cast<" + param.type.compose() + ">( " + name + " )"; - } - else - { - name = "&" + name; - } - arguments += "reinterpret_cast( " + name + " )"; - } - else - { - assert( !param.optional ); - if ( param.type.type == "void" ) - { - // use the original name here, as void-pointer are not mapped to some reference - arguments += param.name; - } - else - { - arguments += "&" + name; - } - } - } - else if ( param.len == "null-terminated" ) - { - assert( ( param.type.type == "char" ) && param.arraySizes.empty() ); - if ( param.optional ) - { - arguments += name + " ? " + name + "->c_str() : nullptr"; - } - else - { - arguments += name + ".c_str()"; - } - } - else - { - if ( ( singularParamIndex != INVALID_INDEX ) && ( params[singularParamIndex].len == param.len ) ) - { - name = "&" + stripPluralS( name ); - } - else - { - name += ".data()"; - } - if ( beginsWith( param.type.type, "Vk" ) || ( param.type.type == "void" ) ) - { - arguments += "reinterpret_cast<" + param.type.prefix + " " + param.type.type + " " + param.type.postfix + - ">( " + name + " )"; - } - else - { - arguments += name; - } - } - } - else if ( param.type.isNonConstPointer() && - ( specialPointerTypes.find( param.type.type ) == specialPointerTypes.end() ) ) - { - assert( beginsWith( param.name, "p" ) ); - std::string name = startLowerCase( stripPrefix( param.name, "p" ) ); - if ( param.len.empty() ) - { - assert( param.arraySizes.empty() ); - if ( beginsWith( param.type.type, "Vk" ) ) - { - // we can ignore param.optional here, as this is a local variable! - arguments += "reinterpret_cast<" + param.type.type + " *>( &" + name + " )"; - } - else - { - assert( !param.optional ); - arguments += "&" + name; - } - } - else - { - assert( param.arraySizes.empty() ); - if ( nonConstPointerAsNullptr ) - { - arguments += "nullptr"; - } - else if ( beginsWith( param.type.type, "Vk" ) || ( param.type.type == "void" ) ) - { - if ( ( singularParamIndex != INVALID_INDEX ) && ( params[singularParamIndex].name == param.name ) ) - { - name = "&" + stripPluralS( name ); - } - else - { - name += ".data()"; - } - arguments += "reinterpret_cast<" + param.type.type + " *>( " + name + " )"; - } - else - { - arguments += name + ".data()"; - } - } - } - else - { - assert( param.len.empty() ); - if ( beginsWith( param.type.type, "Vk" ) ) - { - if ( param.arraySizes.empty() ) - { - auto pointerIt = std::find_if( - params.begin(), params.end(), [¶m]( ParamData const & pd ) { return pd.len == param.name; } ); - if ( pointerIt != params.end() ) - { - arguments += startLowerCase( stripPrefix( pointerIt->name, "p" ) ) + ".size()"; - if ( pointerIt->type.type == "void" ) - { - arguments += " * sizeof( T )"; - } - } - else - { - arguments += "static_cast<" + param.type.type + ">( " + param.name + " )"; - } - } - else - { - assert( param.arraySizes.size() == 1 ); - assert( param.type.prefix == "const" ); - arguments += "reinterpret_cast( " + param.name + " )"; - } - } - else - { - assert( param.arraySizes.empty() ); - if ( ( singularParamIndex != INVALID_INDEX ) && ( params[singularParamIndex].len == param.name ) ) - { - assert( ( param.type.type == "size_t" ) || ( param.type.type == "uint32_t" ) ); - if ( params[singularParamIndex].type.type == "void" ) - { - arguments += "sizeof( T )"; - } - else - { - arguments += "1"; - } - } - else - { - auto pointerIt = std::find_if( - params.begin(), params.end(), [¶m]( ParamData const & pd ) { return pd.len == param.name; } ); - if ( pointerIt != params.end() ) - { - arguments += startLowerCase( stripPrefix( pointerIt->name, "p" ) ) + ".size()"; - if ( pointerIt->type.type == "void" ) - { - arguments += " * sizeof( T )"; - } - } - else - { - arguments += param.name; - } - } - } + arguments += ", "; } + arguments += constructCallArgumentEnhanced( + params[i], params, nonConstPointerAsNullptr, singularParamIndex, raiiHandleMemberFunction ); encounteredArgument = true; } return arguments; @@ -3575,17 +4036,18 @@ std::string VulkanHppGenerator::constructCallArgumentsStandard( std::string cons std::string VulkanHppGenerator::constructCommandResult( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices ) const { assert( commandData.returnType == "VkResult" ); std::set skippedParameters = - determineSkippedParams( commandData.handle, commandData.params, vectorParamIndices, {}, false ); + determineSkippedParams( commandData.params, initialSkipCount, vectorParamIndices, {}, false ); - std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParameters, INVALID_INDEX, definition, false, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string argumentList = constructArgumentListEnhanced( + commandData.params, skippedParameters, INVALID_INDEX, definition, false, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string returnType = ( 1 < commandData.successCodes.size() ) ? "Result" : "typename ResultValueType::type"; @@ -3602,8 +4064,10 @@ std::string VulkanHppGenerator::constructCommandResult( std::string const & return replaceWithMap( functionTemplate, { { "argumentList", argumentList }, - { "callArguments", constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, false ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + { "callArguments", + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "nodiscard", nodiscard }, @@ -3627,6 +4091,7 @@ std::string VulkanHppGenerator::constructCommandResult( std::string const & std::string VulkanHppGenerator::constructCommandResultEnumerate( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::pair const & vectorParamIndices, bool withAllocator ) const @@ -3635,15 +4100,16 @@ std::string VulkanHppGenerator::constructCommandResultEnumerate( std::string con assert( ( commandData.successCodes.size() == 2 ) && ( commandData.successCodes[0] == "VK_SUCCESS" ) && ( commandData.successCodes[1] == "VK_INCOMPLETE" ) ); - std::set skippedParams = determineSkippedParams( commandData.handle, - commandData.params, + std::set skippedParams = determineSkippedParams( commandData.params, + initialSkipCount, { vectorParamIndices }, { vectorParamIndices.second, vectorParamIndices.first }, false ); - std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, withAllocator, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string argumentList = constructArgumentListEnhanced( + commandData.params, skippedParams, INVALID_INDEX, definition, withAllocator, false, true ); + std::string commandName = + determineCommandName( name, initialSkipCount ? commandData.params[initialSkipCount - 1].type.type : "", m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string vectorElementType = ( commandData.params[vectorParamIndices.first].type.type == "void" ) ? "uint8_t" @@ -3685,17 +4151,18 @@ std::string VulkanHppGenerator::constructCommandResultEnumerate( std::string con functionTemplate, { { "allocatorType", allocatorType }, { "argumentList", argumentList }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "const", commandData.handle.empty() ? "" : " const" }, { "counterName", startLowerCase( stripPrefix( commandData.params[vectorParamIndices.second].name, "p" ) ) }, { "counterType", commandData.params[vectorParamIndices.second].type.type }, { "firstCallArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, true, INVALID_INDEX ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, true, INVALID_INDEX, false ) }, { "nodiscard", nodiscard }, { "secondCallArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, { "typenameCheck", typenameCheck }, { "vectorAllocator", withAllocator ? ( "( " + startLowerCase( allocatorType ) + " )" ) : "" }, { "vectorElementType", vectorElementType }, @@ -3727,6 +4194,7 @@ std::string VulkanHppGenerator::constructCommandResultEnumerate( std::string con std::string VulkanHppGenerator::constructCommandResultEnumerateChained( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::pair const & vectorParamIndex, std::vector const & returnParamIndices, @@ -3737,11 +4205,11 @@ std::string ( commandData.successCodes[1] == "VK_INCOMPLETE" ) ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, { vectorParamIndex }, returnParamIndices, false ); + determineSkippedParams( commandData.params, initialSkipCount, { vectorParamIndex }, returnParamIndices, false ); - std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, withAllocator, true ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string argumentList = constructArgumentListEnhanced( + commandData.params, skippedParams, INVALID_INDEX, definition, withAllocator, true, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); assert( beginsWith( commandData.params[vectorParamIndex.first].type.type, "Vk" ) ); std::string vectorElementType = @@ -3795,16 +4263,17 @@ std::string return replaceWithMap( functionTemplate, { { "argumentList", argumentList }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "counterName", startLowerCase( stripPrefix( commandData.params[vectorParamIndex.second].name, "p" ) ) }, { "counterType", commandData.params[vectorParamIndex.second].type.type }, { "firstCallArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, true, INVALID_INDEX ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, true, INVALID_INDEX, false ) }, { "nodiscard", nodiscard }, { "secondCallArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, { "structureChainAllocator", withAllocator ? ( "( structureChainAllocator )" ) : "" }, { "typenameCheck", typenameCheck }, { "vectorElementType", vectorElementType }, @@ -3833,6 +4302,7 @@ std::string std::string VulkanHppGenerator::constructCommandResultEnumerateTwoVectors( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, std::vector const & returnParamIndices, @@ -3849,11 +4319,11 @@ std::string assert( firstVectorParamIt->second == secondVectorParamIt->second ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, vectorParamIndices, returnParamIndices, false ); + determineSkippedParams( commandData.params, initialSkipCount, vectorParamIndices, returnParamIndices, false ); std::string argumentList = constructArgumentListEnhanced( - commandData.params, skippedParams, INVALID_INDEX, definition, withAllocators, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + commandData.params, skippedParams, INVALID_INDEX, definition, withAllocators, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string templateTypeFirst = stripPrefix( commandData.params[firstVectorParamIt->first].type.type, "Vk" ); std::string templateTypeSecond = stripPrefix( commandData.params[secondVectorParamIt->first].type.type, "Vk" ); @@ -3905,19 +4375,20 @@ std::string return replaceWithMap( functionTemplate, { { "argumentList", argumentList }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "counterName", startLowerCase( stripPrefix( stripPluralS( commandData.params[firstVectorParamIt->second].name ), "p" ) ) }, { "counterType", commandData.params[firstVectorParamIt->second].type.type }, { "firstCallArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, true, INVALID_INDEX ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, true, INVALID_INDEX, false ) }, { "firstVectorName", startLowerCase( stripPrefix( commandData.params[firstVectorParamIt->first].name, "p" ) ) }, { "nodiscard", nodiscard }, { "pairConstructor", pairConstructor }, { "secondCallArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, { "secondVectorName", startLowerCase( stripPrefix( commandData.params[secondVectorParamIt->first].name, "p" ) ) }, { "templateTypeFirst", templateTypeFirst }, @@ -3951,6 +4422,7 @@ std::string std::string VulkanHppGenerator::constructCommandResultEnumerateTwoVectorsDeprecated( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, bool withAllocators ) const @@ -3959,7 +4431,7 @@ std::string VulkanHppGenerator::constructCommandResultEnumerateTwoVectorsDepreca std::string argumentList = constructFunctionHeaderArgumentsEnhanced( commandData, returnParamIndex, returnParamIndex, vectorParamIndices, !definition, withAllocators ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string returnType = determineEnhancedReturnType( commandData, returnParamIndex, false ); std::string templateType = stripPrefix( commandData.params[returnParamIndex].type.type, "Vk" ); @@ -3979,24 +4451,27 @@ std::string VulkanHppGenerator::constructCommandResultEnumerateTwoVectorsDepreca templateType + ">::value, int>::type" : ""; - return replaceWithMap( functionTemplate, - { { "argumentList", argumentList }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, - { "classSeparator", commandData.handle.empty() ? "" : "::" }, - { "commandName", commandName }, - { "functionBody", - constructFunctionBodyEnhanced( " ", - name, - commandData, - returnParamIndex, - returnParamIndex, - vectorParamIndices, - true, - returnType, - withAllocators ) }, - { "nodiscard", nodiscard }, - { "returnType", returnType }, - { "typeCheck", typeCheck } } ); + return replaceWithMap( + functionTemplate, + { { "argumentList", argumentList }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, + { "classSeparator", commandData.handle.empty() ? "" : "::" }, + { "commandName", commandName }, + { "functionBody", + constructFunctionBodyEnhanced( " ", + name, + commandData, + initialSkipCount, + returnParamIndex, + returnParamIndex, + vectorParamIndices, + true, + returnType, + withAllocators ) }, + { "nodiscard", nodiscard }, + { "returnType", returnType }, + { "typeCheck", typeCheck } } ); } else { @@ -4021,17 +4496,18 @@ std::string VulkanHppGenerator::constructCommandResultEnumerateTwoVectorsDepreca std::string VulkanHppGenerator::constructCommandResultGetChain( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, size_t nonConstPointerIndex ) const { assert( !commandData.handle.empty() && ( commandData.returnType == "VkResult" ) && !commandData.errorCodes.empty() ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, {}, { nonConstPointerIndex }, false ); + determineSkippedParams( commandData.params, initialSkipCount, {}, { nonConstPointerIndex }, false ); std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); assert( beginsWith( commandData.params[nonConstPointerIndex].type.type, "Vk" ) ); std::string returnType = @@ -4053,8 +4529,9 @@ std::string VulkanHppGenerator::constructCommandResultGetChain( std::string cons functionTemplate, { { "argumentList", argumentList }, { "callArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "returnVariable", startLowerCase( stripPrefix( commandData.params[nonConstPointerIndex].name, "p" ) ) }, @@ -4074,17 +4551,19 @@ std::string VulkanHppGenerator::constructCommandResultGetChain( std::string cons std::string VulkanHppGenerator::constructCommandResultGetHandleUnique( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, size_t nonConstPointerIndex ) const { assert( ( commandData.returnType == "VkResult" ) && ( commandData.successCodes.size() == 1 ) ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, {}, { nonConstPointerIndex }, false ); + determineSkippedParams( commandData.params, initialSkipCount, {}, { nonConstPointerIndex }, false ); std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false, true ); + std::string commandName = + determineCommandName( name, initialSkipCount ? commandData.params[initialSkipCount - 1].type.type : "", m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string returnBaseType = commandData.params[nonConstPointerIndex].type.compose(); assert( endsWith( returnBaseType, "*" ) ); @@ -4128,7 +4607,8 @@ std::string VulkanHppGenerator::constructCommandResultGetHandleUnique( std::stri objectDeleter = "ObjectDestroy"; allocator = "allocator, "; } - std::string className = commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ); + std::string className = + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : ""; std::string parentName = ( className.empty() || ( commandData.params[nonConstPointerIndex].type.type == "VkDevice" ) ) ? "NoParent" : className; @@ -4138,7 +4618,7 @@ std::string VulkanHppGenerator::constructCommandResultGetHandleUnique( std::stri { { "allocator", allocator }, { "argumentList", argumentList }, { "callArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, { "className", className }, { "classSeparator", className.empty() ? "" : "::" }, { "commandName", commandName }, @@ -4169,6 +4649,7 @@ std::string VulkanHppGenerator::constructCommandResultGetHandleUnique( std::stri std::string VulkanHppGenerator::constructCommandResultGetTwoValues( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::vector const & nonConstPointerParamIndices ) const { @@ -4176,11 +4657,11 @@ std::string VulkanHppGenerator::constructCommandResultGetTwoValues( ( nonConstPointerParamIndices.size() == 2 ) && !commandData.handle.empty() ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, {}, nonConstPointerParamIndices, false ); + determineSkippedParams( commandData.params, initialSkipCount, {}, nonConstPointerParamIndices, false ); std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string firstType = commandData.params[nonConstPointerParamIndices[0]].type.compose(); assert( endsWith( firstType, "*" ) ); firstType.pop_back(); @@ -4207,8 +4688,9 @@ std::string VulkanHppGenerator::constructCommandResultGetTwoValues( functionTemplate, { { "argumentList", argumentList }, { "callArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "firstType", firstType }, @@ -4236,6 +4718,7 @@ std::string VulkanHppGenerator::constructCommandResultGetTwoValues( std::string VulkanHppGenerator::constructCommandResultGetTwoVectors( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices ) const { @@ -4250,11 +4733,11 @@ std::string #endif std::set skippedParameters = - determineSkippedParams( commandData.handle, commandData.params, vectorParamIndices, {}, false ); + determineSkippedParams( commandData.params, initialSkipCount, vectorParamIndices, {}, false ); - std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParameters, INVALID_INDEX, definition, false, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string argumentList = constructArgumentListEnhanced( + commandData.params, skippedParameters, INVALID_INDEX, definition, false, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::pair>> vectorSizeCheck = needsVectorSizeCheck( vectorParamIndices ); std::string noexceptString = vectorSizeCheck.first ? "VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS" : "VULKAN_HPP_NOEXCEPT"; @@ -4271,15 +4754,17 @@ std::string return replaceWithMap( functionTemplate, { { "argumentList", argumentList }, - { "callArguments", constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, false ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + { "callArguments", + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "noexcept", noexceptString }, { "successCodeList", constructSuccessCodeList( commandData.successCodes ) }, { "vectorSizeCheck", vectorSizeCheck.first - ? constructVectorSizeCheck( name, commandData, vectorSizeCheck.second, skippedParameters ) + ? constructVectorSizeCheck( name, commandData, initialSkipCount, vectorSizeCheck.second, skippedParameters ) : "" }, { "vkCommand", name } } ); } @@ -4300,17 +4785,19 @@ std::string std::string VulkanHppGenerator::constructCommandResultGetValue( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, size_t nonConstPointerIndex ) const { assert( commandData.returnType == "VkResult" ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, {}, { nonConstPointerIndex }, false ); + determineSkippedParams( commandData.params, initialSkipCount, {}, { nonConstPointerIndex }, false ); std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false, true ); + std::string commandName = + determineCommandName( name, initialSkipCount ? commandData.params[initialSkipCount - 1].type.type : "", m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string returnBaseType = commandData.params[nonConstPointerIndex].type.compose(); assert( endsWith( returnBaseType, "*" ) ); @@ -4332,8 +4819,9 @@ std::string VulkanHppGenerator::constructCommandResultGetValue( std::string cons functionTemplate, { { "argumentList", argumentList }, { "callArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "const", commandData.handle.empty() ? "" : " const" }, { "commandName", commandName }, @@ -4362,6 +4850,7 @@ std::string VulkanHppGenerator::constructCommandResultGetValue( std::string cons std::string VulkanHppGenerator::constructCommandResultGetValueDeprecated( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const @@ -4371,7 +4860,7 @@ std::string std::string argumentList = constructFunctionHeaderArgumentsEnhanced( commandData, returnParamIndex, INVALID_INDEX, vectorParamIndices, !definition, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); assert( !beginsWith( commandData.params[returnParamIndex].type.type, "Vk" ) ); @@ -4390,12 +4879,21 @@ std::string return replaceWithMap( functionTemplate, { { "argumentList", argumentList }, - { "className", commandData.handle.empty() ? "::" : stripPrefix( commandData.handle, "Vk" ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "functionBody", - constructFunctionBodyEnhanced( - " ", name, commandData, returnParamIndex, INVALID_INDEX, vectorParamIndices, false, returnType, false ) }, + constructFunctionBodyEnhanced( " ", + name, + commandData, + initialSkipCount, + returnParamIndex, + INVALID_INDEX, + vectorParamIndices, + false, + returnType, + false ) }, { "nodiscard", nodiscard }, { "returnType", returnType } } ); } @@ -4415,6 +4913,7 @@ std::string std::string VulkanHppGenerator::constructCommandResultGetVector( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const @@ -4422,11 +4921,11 @@ std::string VulkanHppGenerator::constructCommandResultGetVector( std::string con assert( commandData.returnType == "VkResult" ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, vectorParamIndices, { returnParamIndex }, false ); + determineSkippedParams( commandData.params, initialSkipCount, vectorParamIndices, { returnParamIndex }, false ); std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string returnType = constructReturnType( commandData, "std::vector" ); @@ -4446,8 +4945,9 @@ std::string VulkanHppGenerator::constructCommandResultGetVector( std::string con functionTemplate, { { "argumentList", argumentList }, { "callArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "dataName", startLowerCase( stripPrefix( commandData.params[returnParamIndex].name, "p" ) ) }, @@ -4474,6 +4974,7 @@ std::string VulkanHppGenerator::constructCommandResultGetVector( std::string con std::string VulkanHppGenerator::constructCommandResultGetVectorAndValue( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, std::vector const & returnParamIndices, @@ -4487,11 +4988,11 @@ std::string assert( commandData.returnType == "VkResult" ); std::set skippedParameters = - determineSkippedParams( commandData.handle, commandData.params, vectorParamIndices, returnParamIndices, false ); + determineSkippedParams( commandData.params, initialSkipCount, vectorParamIndices, returnParamIndices, false ); std::string argumentList = constructArgumentListEnhanced( - commandData.params, skippedParameters, INVALID_INDEX, definition, withAllocator, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + commandData.params, skippedParameters, INVALID_INDEX, definition, withAllocator, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string returnType = constructReturnType( commandData, "std::vector" ); @@ -4525,8 +5026,9 @@ std::string { "allocatorType", allocatorType }, { "argumentList", argumentList }, { "callArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "nodiscard", nodiscard }, @@ -4566,6 +5068,7 @@ std::string std::string VulkanHppGenerator::constructCommandResultGetVectorDeprecated( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const @@ -4574,7 +5077,7 @@ std::string std::string argumentList = constructFunctionHeaderArgumentsEnhanced( commandData, INVALID_INDEX, returnParamIndex, vectorParamIndices, !definition, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string returnType = constructReturnType( commandData, "void" ); @@ -4591,12 +5094,21 @@ std::string return replaceWithMap( functionTemplate, { { "argumentList", argumentList }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "functionBody", - constructFunctionBodyEnhanced( - " ", name, commandData, INVALID_INDEX, returnParamIndex, vectorParamIndices, false, "void", false ) }, + constructFunctionBodyEnhanced( " ", + name, + commandData, + initialSkipCount, + INVALID_INDEX, + returnParamIndex, + vectorParamIndices, + false, + "void", + false ) }, { "nodiscard", nodiscard }, { "returnType", returnType } } ); } @@ -4617,6 +5129,7 @@ std::string std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandles( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex, @@ -4625,11 +5138,11 @@ std::string assert( commandData.returnType == "VkResult" ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, vectorParamIndices, { returnParamIndex }, false ); + determineSkippedParams( commandData.params, initialSkipCount, vectorParamIndices, { returnParamIndex }, false ); - std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, withAllocator, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string argumentList = constructArgumentListEnhanced( + commandData.params, skippedParams, INVALID_INDEX, definition, withAllocator, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string handleType = stripPrefix( commandData.params[returnParamIndex].type.type, "Vk" ); std::string returnType = @@ -4658,8 +5171,9 @@ std::string functionTemplate, { { "argumentList", argumentList }, { "callArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "nodiscard", nodiscard }, @@ -4697,6 +5211,7 @@ std::string std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandlesSingular( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const @@ -4706,14 +5221,15 @@ std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandlesSingular assert( commandData.params[vectorParamIndices.begin()->second].type.isValue() ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, vectorParamIndices, { returnParamIndex }, true ); + determineSkippedParams( commandData.params, initialSkipCount, vectorParamIndices, { returnParamIndex }, true ); size_t singularParam = ( returnParamIndex == vectorParamIndices.begin()->first ) ? std::next( vectorParamIndices.begin() )->first : vectorParamIndices.begin()->first; std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, singularParam, definition, false, false ); - std::string commandName = stripPluralS( determineCommandName( name, commandData.params[0].type.type ) ); + constructArgumentListEnhanced( commandData.params, skippedParams, singularParam, definition, false, false, true ); + std::string commandName = + stripPluralS( determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ) ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string handleType = stripPrefix( commandData.params[returnParamIndex].type.type, "Vk" ); std::string returnType = ( commandData.successCodes.size() == 1 ) @@ -4735,8 +5251,9 @@ std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandlesSingular functionTemplate, { { "argumentList", argumentList }, { "callArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, returnParamIndex ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, returnParamIndex, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "nodiscard", nodiscard }, @@ -4764,6 +5281,7 @@ std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandlesSingular std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandlesUnique( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex, @@ -4772,11 +5290,11 @@ std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandlesUnique( assert( commandData.returnType == "VkResult" ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, vectorParamIndices, { returnParamIndex }, false ); + determineSkippedParams( commandData.params, initialSkipCount, vectorParamIndices, { returnParamIndex }, false ); - std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, withAllocator, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string argumentList = constructArgumentListEnhanced( + commandData.params, skippedParams, INVALID_INDEX, definition, withAllocator, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string handleType = stripPrefix( commandData.params[returnParamIndex].type.type, "Vk" ); std::string returnType = @@ -4806,7 +5324,8 @@ std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandlesUnique( return createResultValue( result, std::move( ${uniqueVectorName} ), VULKAN_HPP_NAMESPACE_STRING "::${className}${classSeparator}${commandName}Unique"${successCodeList} ); })"; - std::string className = commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ); + std::string className = + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : ""; std::string deleterDefinition; std::vector lenParts = tokenize( commandData.params[returnParamIndex].len, "->" ); @@ -4839,7 +5358,7 @@ std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandlesUnique( functionTemplate, { { "argumentList", argumentList }, { "callArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, { "className", className }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, @@ -4881,6 +5400,7 @@ std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandlesUnique( std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandlesUniqueSingular( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const @@ -4890,14 +5410,15 @@ std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandlesUniqueSi assert( commandData.params[vectorParamIndices.begin()->second].type.isValue() ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, vectorParamIndices, { returnParamIndex }, true ); + determineSkippedParams( commandData.params, initialSkipCount, vectorParamIndices, { returnParamIndex }, true ); size_t singularParam = ( returnParamIndex == vectorParamIndices.begin()->first ) ? std::next( vectorParamIndices.begin() )->first : vectorParamIndices.begin()->first; std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, singularParam, definition, false, false ); - std::string commandName = stripPluralS( determineCommandName( name, commandData.params[0].type.type ) ); + constructArgumentListEnhanced( commandData.params, skippedParams, singularParam, definition, false, false, true ); + std::string commandName = + stripPluralS( determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ) ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string handleType = stripPrefix( commandData.params[returnParamIndex].type.type, "Vk" ); std::string returnType = ( commandData.successCodes.size() == 1 ) @@ -4920,8 +5441,9 @@ std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandlesUniqueSi functionTemplate, { { "argumentList", argumentList }, { "callArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, returnParamIndex ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, returnParamIndex, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "handleName", @@ -4951,6 +5473,7 @@ std::string VulkanHppGenerator::constructCommandResultGetVectorOfHandlesUniqueSi std::string VulkanHppGenerator::constructCommandResultGetVectorSingular( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const @@ -4958,11 +5481,12 @@ std::string assert( commandData.returnType == "VkResult" ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, vectorParamIndices, { returnParamIndex }, true ); + determineSkippedParams( commandData.params, initialSkipCount, vectorParamIndices, { returnParamIndex }, true ); std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false ); - std::string commandName = stripPluralS( determineCommandName( name, commandData.params[0].type.type ) ); + constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false, true ); + std::string commandName = + stripPluralS( determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ) ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string returnType = constructReturnType( commandData, "T" ); @@ -4981,8 +5505,9 @@ std::string functionTemplate, { { "argumentList", argumentList }, { "callArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, returnParamIndex ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, returnParamIndex, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "dataName", startLowerCase( stripPrefix( commandData.params[returnParamIndex].name, "p" ) ) }, @@ -5007,14 +5532,16 @@ std::string std::string VulkanHppGenerator::constructCommandStandard( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition ) const { - std::set skippedParams = determineSkippedParams( commandData.handle, commandData.params, {}, {}, false ); + std::set skippedParams = determineSkippedParams( commandData.params, initialSkipCount, {}, {}, false ); std::string argumentList = constructArgumentListStandard( commandData.params, skippedParams ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); - std::string nodiscard = constructNoDiscardStandard( commandData ); - std::string returnType = stripPrefix( commandData.returnType, "Vk" ); + std::string commandName = + determineCommandName( name, initialSkipCount ? commandData.params[initialSkipCount - 1].type.type : "", m_tags ); + std::string nodiscard = constructNoDiscardStandard( commandData ); + std::string returnType = stripPrefix( commandData.returnType, "Vk" ); if ( definition ) { @@ -5036,15 +5563,17 @@ std::string VulkanHppGenerator::constructCommandStandard( std::string const & na ${functionBody}; })"; - return replaceWithMap( functionTemplate, - { { "argumentList", argumentList }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, - { "classSeparator", commandData.handle.empty() ? "" : "::" }, - { "commandName", commandName }, - { "const", commandData.handle.empty() ? "" : " const" }, - { "functionBody", functionBody }, - { "nodiscard", nodiscard }, - { "returnType", returnType } } ); + return replaceWithMap( + functionTemplate, + { { "argumentList", argumentList }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, + { "classSeparator", commandData.handle.empty() ? "" : "::" }, + { "commandName", commandName }, + { "const", commandData.handle.empty() ? "" : " const" }, + { "functionBody", functionBody }, + { "nodiscard", nodiscard }, + { "returnType", returnType } } ); } else { @@ -5063,16 +5592,17 @@ std::string VulkanHppGenerator::constructCommandStandard( std::string const & na std::string VulkanHppGenerator::constructCommandType( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition ) const { assert( ( commandData.returnType != "VkResult" ) && ( commandData.returnType != "void" ) && commandData.successCodes.empty() && commandData.errorCodes.empty() ); - std::set skippedParameters = determineSkippedParams( commandData.handle, commandData.params, {}, {}, false ); + std::set skippedParameters = determineSkippedParams( commandData.params, initialSkipCount, {}, {}, false ); - std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParameters, INVALID_INDEX, definition, false, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string argumentList = constructArgumentListEnhanced( + commandData.params, skippedParameters, INVALID_INDEX, definition, false, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); std::string returnType = stripPrefix( commandData.returnType, "Vk" ); @@ -5088,8 +5618,10 @@ std::string VulkanHppGenerator::constructCommandType( std::string const & name, return replaceWithMap( functionTemplate, { { "argumentList", argumentList }, - { "callArguments", constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, false ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + { "callArguments", + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "nodiscard", nodiscard }, @@ -5112,17 +5644,18 @@ std::string VulkanHppGenerator::constructCommandType( std::string const & name, std::string VulkanHppGenerator::constructCommandVoid( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices ) const { assert( ( commandData.returnType == "void" ) && commandData.successCodes.empty() && commandData.errorCodes.empty() ); std::set skippedParameters = - determineSkippedParams( commandData.handle, commandData.params, vectorParamIndices, {}, false ); + determineSkippedParams( commandData.params, initialSkipCount, vectorParamIndices, {}, false ); - std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParameters, INVALID_INDEX, definition, false, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string argumentList = constructArgumentListEnhanced( + commandData.params, skippedParameters, INVALID_INDEX, definition, false, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string typenameT = ( ( vectorParamIndices.size() == 1 ) && ( commandData.params[vectorParamIndices.begin()->first].type.type == "void" ) ) ? "typename T, " @@ -5142,15 +5675,17 @@ std::string VulkanHppGenerator::constructCommandVoid( std::string const & return replaceWithMap( functionTemplate, { { "argumentList", argumentList }, - { "callArguments", constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, false ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + { "callArguments", + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "noexcept", noexceptString }, { "typenameT", typenameT }, { "vectorSizeCheck", vectorSizeCheck.first - ? constructVectorSizeCheck( name, commandData, vectorSizeCheck.second, skippedParameters ) + ? constructVectorSizeCheck( name, commandData, initialSkipCount, vectorSizeCheck.second, skippedParameters ) : "" }, { "vkCommand", name } } ); } @@ -5170,6 +5705,7 @@ std::string VulkanHppGenerator::constructCommandVoid( std::string const & std::string VulkanHppGenerator::constructCommandVoidEnumerate( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::pair const & vectorParamIndex, std::vector const & returnParamIndices, @@ -5179,11 +5715,11 @@ std::string VulkanHppGenerator::constructCommandVoidEnumerate( std::string const commandData.successCodes.empty() && commandData.errorCodes.empty() ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, { vectorParamIndex }, returnParamIndices, false ); + determineSkippedParams( commandData.params, initialSkipCount, { vectorParamIndex }, returnParamIndices, false ); std::string argumentList = constructArgumentListEnhanced( - commandData.params, skippedParams, INVALID_INDEX, definition, withAllocators, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + commandData.params, skippedParams, INVALID_INDEX, definition, withAllocators, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string vectorElementType = stripPrefix( commandData.params[vectorParamIndex.first].type.type, "Vk" ); if ( definition ) @@ -5210,15 +5746,16 @@ std::string VulkanHppGenerator::constructCommandVoidEnumerate( std::string const return replaceWithMap( functionTemplate, { { "argumentList", argumentList }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "counterName", startLowerCase( stripPrefix( commandData.params[vectorParamIndex.second].name, "p" ) ) }, { "counterType", commandData.params[vectorParamIndex.second].type.type }, { "firstCallArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, true, INVALID_INDEX ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, true, INVALID_INDEX, false ) }, { "secondCallArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, { "typenameCheck", typenameCheck }, { "vectorAllocator", withAllocators @@ -5252,6 +5789,7 @@ std::string VulkanHppGenerator::constructCommandVoidEnumerate( std::string const std::string VulkanHppGenerator::constructCommandVoidEnumerateChained( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::pair const & vectorParamIndex, std::vector const & returnParamIndices, @@ -5261,11 +5799,11 @@ std::string commandData.successCodes.empty() && commandData.errorCodes.empty() ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, { vectorParamIndex }, returnParamIndices, false ); + determineSkippedParams( commandData.params, initialSkipCount, { vectorParamIndex }, returnParamIndices, false ); - std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, withAllocators, true ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string argumentList = constructArgumentListEnhanced( + commandData.params, skippedParams, INVALID_INDEX, definition, withAllocators, true, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); assert( beginsWith( commandData.params[vectorParamIndex.first].type.type, "Vk" ) ); std::string vectorElementType = "VULKAN_HPP_NAMESPACE::" + stripPrefix( commandData.params[vectorParamIndex.first].type.type, "Vk" ); @@ -5303,15 +5841,16 @@ std::string return replaceWithMap( functionTemplate, { { "argumentList", argumentList }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "counterName", startLowerCase( stripPrefix( commandData.params[vectorParamIndex.second].name, "p" ) ) }, { "counterType", commandData.params[vectorParamIndex.second].type.type }, { "firstCallArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, true, INVALID_INDEX ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, true, INVALID_INDEX, false ) }, { "secondCallArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, { "structureChainAllocator", withAllocators ? ( ", structureChainAllocator" ) : "" }, { "typenameCheck", typenameCheck }, { "vectorElementType", vectorElementType }, @@ -5337,17 +5876,18 @@ std::string std::string VulkanHppGenerator::constructCommandVoidGetChain( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, size_t nonConstPointerIndex ) const { assert( ( commandData.returnType == "void" ) && commandData.successCodes.empty() && commandData.errorCodes.empty() ); std::set skippedParams = - determineSkippedParams( commandData.handle, commandData.params, {}, { nonConstPointerIndex }, false ); + determineSkippedParams( commandData.params, initialSkipCount, {}, { nonConstPointerIndex }, false ); std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + constructArgumentListEnhanced( commandData.params, skippedParams, INVALID_INDEX, definition, false, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); assert( beginsWith( commandData.params[nonConstPointerIndex].type.type, "Vk" ) ); std::string returnType = @@ -5369,8 +5909,9 @@ std::string VulkanHppGenerator::constructCommandVoidGetChain( std::string const functionTemplate, { { "argumentList", argumentList }, { "callArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, - { "className", commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, + { "className", + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : "" }, { "classSeparator", commandData.handle.empty() ? "" : "::" }, { "commandName", commandName }, { "returnVariable", startLowerCase( stripPrefix( commandData.params[nonConstPointerIndex].name, "p" ) ) }, @@ -5389,6 +5930,7 @@ std::string VulkanHppGenerator::constructCommandVoidGetChain( std::string const std::string VulkanHppGenerator::constructCommandVoidGetValue( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const @@ -5399,17 +5941,13 @@ std::string VulkanHppGenerator::constructCommandVoidGetValue( std::string const assert( vectorParamIndices.empty() || ( vectorParamIndices.begin()->second != INVALID_INDEX ) ); std::set skippedParameters = - determineSkippedParams( commandData.handle, commandData.params, {}, { returnParamIndex }, false ); + determineSkippedParams( commandData.params, initialSkipCount, {}, { returnParamIndex }, false ); - std::string argumentList = - constructArgumentListEnhanced( commandData.params, skippedParameters, INVALID_INDEX, definition, false, false ); - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string argumentList = constructArgumentListEnhanced( + commandData.params, skippedParameters, INVALID_INDEX, definition, false, false, true ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string nodiscard = determineNoDiscard( 1 < commandData.successCodes.size(), 1 < commandData.errorCodes.size() ); - std::string returnType = commandData.params[returnParamIndex].type.type; - if ( beginsWith( returnType, "Vk" ) ) - { - returnType = "VULKAN_HPP_NAMESPACE::" + stripPrefix( returnType, "Vk" ); - } + std::string returnType = stripPostfix( commandData.params[returnParamIndex].type.compose(), "*" ); bool needsVectorSizeCheck = !vectorParamIndices.empty() && isLenByStructMember( commandData.params[vectorParamIndices.begin()->first].len, @@ -5418,7 +5956,8 @@ std::string VulkanHppGenerator::constructCommandVoidGetValue( std::string const if ( definition ) { - std::string className = commandData.handle.empty() ? "" : stripPrefix( commandData.handle, "Vk" ); + std::string className = + initialSkipCount ? stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) : ""; std::string classSeparator = commandData.handle.empty() ? "" : "::"; std::string vectorSizeCheck; @@ -5460,7 +5999,7 @@ std::string VulkanHppGenerator::constructCommandVoidGetValue( std::string const functionTemplate, { { "argumentList", argumentList }, { "callArguments", - constructCallArgumentsEnhanced( commandData.handle, commandData.params, false, INVALID_INDEX ) }, + constructCallArgumentsEnhanced( commandData.params, initialSkipCount, false, INVALID_INDEX, false ) }, { "className", className }, { "classSeparator", classSeparator }, { "commandName", commandName }, @@ -5495,9 +6034,26 @@ std::string VulkanHppGenerator::constructConstexprString( std::pair const & successCodes ) const +{ + assert( !successCodes.empty() ); + std::string failureCheck = "result != VULKAN_HPP_NAMESPACE::Result::" + createSuccessCode( successCodes[0], m_tags ); + if ( 1 < successCodes.size() ) + { + failureCheck = "( " + failureCheck + " )"; + for ( size_t i = 1; i < successCodes.size(); ++i ) + { + failureCheck += + "&& ( result != VULKAN_HPP_NAMESPACE::Result::" + createSuccessCode( successCodes[i], m_tags ) + " )"; + } + } + return failureCheck; +} + std::string VulkanHppGenerator::constructFunctionBodyEnhanced( std::string const & indentation, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, size_t returnParamIndex, size_t templateParamIndex, std::map const & vectorParamIndices, @@ -5509,7 +6065,7 @@ std::string VulkanHppGenerator::constructFunctionBodyEnhanced( std::string const if ( 1 < vectorParamIndices.size() ) { appendFunctionBodyEnhancedMultiVectorSizeCheck( - str, indentation, name, commandData, returnParamIndex, vectorParamIndices ); + str, indentation, name, commandData, initialSkipCount, returnParamIndex, vectorParamIndices ); } std::string returnName; @@ -5533,7 +6089,7 @@ std::string VulkanHppGenerator::constructFunctionBodyEnhanced( std::string const if ( ( commandData.returnType == "VkResult" ) || !commandData.successCodes.empty() ) { appendFunctionBodyEnhancedReturnResultValue( - str, indentation, returnName, name, commandData, returnParamIndex, twoStep ); + str, indentation, returnName, name, commandData, initialSkipCount, returnParamIndex, twoStep ); } return str; } @@ -5611,6 +6167,2831 @@ std::string VulkanHppGenerator::constructNoDiscardStandard( CommandData const & return ( 1 < commandData.successCodes.size() + commandData.errorCodes.size() ) ? "VULKAN_HPP_NODISCARD " : ""; } +std::pair VulkanHppGenerator::constructRAIIHandleConstructor( + std::pair const & handle, + std::map::const_iterator constructorIt, + std::string const & enter, + std::string const & leave ) const +{ + auto handleParamIt = std::find_if( constructorIt->second.params.begin(), + constructorIt->second.params.end(), + [&handle]( ParamData const & pd ) { return pd.type.type == handle.first; } ); + assert( handleParamIt != constructorIt->second.params.end() && handleParamIt->type.isNonConstPointer() ); + + std::string singularConstructor, arrayConstructor; + bool constructedConstructor = false; + if ( handleParamIt->len.empty() ) + { + if ( constructorIt->second.returnType == "void" ) + { + singularConstructor = constructRAIIHandleConstructorVoid( handle, constructorIt, enter, leave ); + constructedConstructor = true; + } + else if ( ( constructorIt->second.returnType == "VkResult" ) && + ( constructorIt->second.successCodes.size() == 1 ) && ( !constructorIt->second.errorCodes.empty() ) ) + { + singularConstructor = constructRAIIHandleConstructorResult( handle, constructorIt, enter, leave ); + constructedConstructor = true; + } + } + else + { + auto lenParamIt = + std::find_if( constructorIt->second.params.begin(), + constructorIt->second.params.end(), + [&handleParamIt]( ParamData const & pd ) { return pd.name == handleParamIt->len; } ); + if ( ( lenParamIt != constructorIt->second.params.end() ) && lenParamIt->type.isNonConstPointer() && + ( constructorIt->second.successCodes.size() == 2 ) && + ( constructorIt->second.successCodes[0] == "VK_SUCCESS" ) && + ( constructorIt->second.successCodes[1] == "VK_INCOMPLETE" ) ) + { + arrayConstructor = + constructRAIIHandleConstructorEnumerate( handle, constructorIt, handleParamIt, lenParamIt, enter, leave ); + } + else + { + arrayConstructor = constructRAIIHandleConstructorVector( handle, constructorIt, handleParamIt, enter, leave ); + if ( ( lenParamIt != constructorIt->second.params.end() ) && + !checkEquivalentSingularConstructor( handle.second.constructorIts, constructorIt, lenParamIt ) ) + { + singularConstructor = + constructRAIIHandleConstructorVectorSingular( handle, constructorIt, handleParamIt, enter, leave ); + } + } + constructedConstructor = true; + } + if ( !constructedConstructor ) + { + throw std::runtime_error( "Never encountered a constructor function like " + constructorIt->first + " !" ); + } + return std::make_pair( singularConstructor, arrayConstructor ); +} + +std::string VulkanHppGenerator::constructRAIIHandleConstructorArguments( std::string const & handleType, + std::vector const & params, + bool singular, + bool encounteredArgument ) const +{ + std::string arguments; + for ( auto param : params ) + { + if ( param.type.type != handleType ) // filter out the constructed type + { + // the specialPointerTypes are considered const-pointers! + if ( param.type.isNonConstPointer() && + ( specialPointerTypes.find( param.type.type ) == specialPointerTypes.end() ) ) + { + // this is supposed to be the returned size on an enumeration function! + assert( param.type.type == "uint32_t" ); + auto typeIt = std::find_if( + params.begin(), params.end(), [&handleType]( ParamData const & pd ) { return pd.type.type == handleType; } ); + assert( typeIt != params.end() ); + assert( typeIt->len == param.name ); + continue; + } + else if ( std::find_if( params.begin(), params.end(), [¶m]( ParamData const & pd ) { + return pd.len == param.name; + } ) != params.end() ) + { + // this is the len of an other parameter, which will be mapped to an ArrayProxy + assert( param.type.isValue() && ( param.type.type == "uint32_t" ) ); + assert( param.arraySizes.empty() && param.len.empty() && !param.optional ); + continue; + } + if ( encounteredArgument ) + { + arguments += ", "; + } + if ( param.type.isConstPointer() ) + { + assert( beginsWith( param.type.type, "Vk" ) ); + assert( beginsWith( param.name, "p" ) ); + std::string argumentName = startLowerCase( stripPrefix( param.name, "p" ) ); + std::string argumentType = "VULKAN_HPP_NAMESPACE::" + stripPrefix( param.type.type, "Vk" ); + if ( param.optional ) + { + assert( param.len.empty() ); + arguments += "VULKAN_HPP_NAMESPACE::Optional " + argumentName + " = nullptr"; + } + else if ( param.len.empty() ) + { + arguments += argumentType + " const & " + argumentName; + } + else + { + assert( std::find_if( params.begin(), params.end(), [¶m]( ParamData const & pd ) { + return pd.name == param.len; + } ) != params.end() ); + if ( singular ) + { + arguments += argumentType + " const & " + stripPluralS( argumentName ); + } + else + { + arguments += "VULKAN_HPP_NAMESPACE::ArrayProxy<" + argumentType + "> const & " + argumentName; + } + } + } + else if ( specialPointerTypes.find( param.type.type ) != specialPointerTypes.end() ) + { + assert( !param.optional ); + assert( param.type.isNonConstPointer() ); + arguments += param.type.type + " & " + param.name; + } + else if ( ( param.type.isValue() ) && isHandleType( param.type.type ) ) + { + std::string argument = + "VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::" + stripPrefix( param.type.type, "Vk" ); + if ( param.optional ) + { + argument = "VULKAN_HPP_NAMESPACE::Optional"; + } + arguments += argument + " const & " + param.name; + } + else + { + assert( !param.optional ); + arguments += param.type.compose() + " " + param.name; + } + encounteredArgument = true; + } + } + return arguments; +} + +std::string VulkanHppGenerator::constructRAIIHandleConstructorCallArguments( std::string const & handleType, + std::vector const & params, + bool nonConstPointerAsNullptr, + size_t singularParamIndex, + bool allocatorIsMemberVariable ) const +{ + std::string arguments; + bool encounteredArgument = false; + for ( auto param : params ) + { + if ( encounteredArgument ) + { + arguments += ", "; + } + if ( param.type.type == handleType ) + { + assert( param.type.isNonConstPointer() && param.arraySizes.empty() ); + if ( ( param.len.empty() ) || ( singularParamIndex != INVALID_INDEX ) ) + { + assert( !param.optional ); + assert( !param.optional ); + assert( ( singularParamIndex == INVALID_INDEX ) || ( param.name == params[singularParamIndex].name ) ); + arguments += + "reinterpret_cast<" + handleType + "*>( &m_" + startLowerCase( stripPrefix( handleType, "Vk" ) ) + " )"; + } + else if ( nonConstPointerAsNullptr ) + { + arguments += "nullptr"; + } + else + { + arguments += startLowerCase( stripPrefix( param.name, "p" ) ) + ".data()"; + } + } + else if ( param.type.type == "VkAllocationCallbacks" ) + { + assert( param.optional ); + if ( allocatorIsMemberVariable ) + { + // VkAllocationCallbacks is stored as a member of the handle class ! + arguments += "m_allocator"; + } + else + { + arguments += + "reinterpret_cast(static_cast( allocator ) )"; + } + } + else if ( m_handles.find( param.type.type ) != m_handles.end() ) + { + assert( param.type.isValue() && param.arraySizes.empty() && param.len.empty() ); + if ( param.optional ) + { + arguments += param.name + " ? static_cast<" + param.type.type + ">( **" + param.name + " ) : 0"; + } + else + { + arguments += "static_cast<" + param.type.type + " >( *" + param.name + " )"; + } + } + else + { + assert( !param.optional ); + arguments += constructCallArgumentEnhanced( param, params, nonConstPointerAsNullptr, singularParamIndex, true ); + } + encounteredArgument = true; + } + return arguments; +} + +std::string VulkanHppGenerator::constructRAIIHandleConstructorEnumerate( + std::pair const & handle, + std::map::const_iterator constructorIt, + std::vector::const_iterator handleParamIt, + std::vector::const_iterator lenParamIt, + std::string const & enter, + std::string const & leave ) const +{ + std::string handleConstructorArguments = constructRAIIHandleSingularConstructorArguments( handle, constructorIt ); + std::string handleType = stripPrefix( handle.first, "Vk" ); + + const std::string constructorTemplate = + R"( +${enter} ${handleType}s( ${constructorArguments} ) + { + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher = ${parentName}.getDispatcher(); + std::vector<${vectorElementType}> ${vectorName}; + ${counterType} ${counterName}; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( dispatcher->${constructorCall}( ${firstCallArguments} ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ${counterName} ) + { + ${vectorName}.resize( ${counterName} ); + result = static_cast( dispatcher->${constructorCall}( ${secondCallArguments} ) ); + VULKAN_HPP_ASSERT( ${counterName} <= ${vectorName}.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + this->reserve( ${counterName} ); + for ( auto const & ${handleName} : ${vectorName} ) + { + this->emplace_back( ${handleConstructorArguments}, dispatcher ); + } + } + else + { + throwResultException( result, "${constructorCall}" ); + } + } +${leave})"; + + return replaceWithMap( + constructorTemplate, + { { "constructorArguments", + constructRAIIHandleConstructorArguments( handle.first, constructorIt->second.params, false, false ) }, + { "constructorCall", constructorIt->first }, + { "counterName", startLowerCase( stripPrefix( lenParamIt->name, "p" ) ) }, + { "counterType", lenParamIt->type.type }, + { "enter", enter }, + { "firstCallArguments", + constructRAIIHandleConstructorCallArguments( + handle.first, constructorIt->second.params, true, INVALID_INDEX, true ) }, + { "handleConstructorArguments", handleConstructorArguments }, + { "handleName", startLowerCase( handleType ) }, + { "handleType", handleType }, + { "leave", leave }, + { "parentName", constructorIt->second.params.front().name }, + { "secondCallArguments", + constructRAIIHandleConstructorCallArguments( + handle.first, constructorIt->second.params, false, INVALID_INDEX, true ) }, + { "vectorElementType", handleParamIt->type.type }, + { "vectorName", startLowerCase( stripPrefix( handleParamIt->name, "p" ) ) } } ); +} + +std::string VulkanHppGenerator::constructRAIIHandleConstructorInitializationList( + std::string const & handleType, + std::map::const_iterator constructorIt, + std::map::const_iterator destructorIt, + bool hasSecondLevelCommands ) const +{ + std::string initializationList; + if ( destructorIt != m_commands.end() ) + { + for ( auto destructorParam : destructorIt->second.params ) + { + if ( destructorParam.type.type != handleType ) + { + if ( isHandleType( destructorParam.type.type ) ) + { + assert( destructorParam.type.isValue() && destructorParam.arraySizes.empty() && destructorParam.len.empty() && + !destructorParam.optional ); + initializationList += "m_" + startLowerCase( stripPrefix( destructorParam.type.type, "Vk" ) ) + "( "; + auto constructorParamIt = std::find_if( + constructorIt->second.params.begin(), + constructorIt->second.params.end(), + [&destructorParam]( ParamData const & pd ) { return pd.type.type == destructorParam.type.type; } ); + if ( constructorParamIt != constructorIt->second.params.end() ) + { + assert( constructorParamIt->type.isValue() && constructorParamIt->arraySizes.empty() && + constructorParamIt->len.empty() && !constructorParamIt->optional ); + initializationList += "*" + constructorParamIt->name; + } + else + { +#if !defined( NDEBUG ) + bool found = false; +#endif + for ( auto constructorParam : constructorIt->second.params ) + { + auto structureIt = m_structures.find( constructorParam.type.type ); + if ( structureIt != m_structures.end() ) + { + auto structureMemberIt = std::find_if( + structureIt->second.members.begin(), + structureIt->second.members.end(), + [&destructorParam]( MemberData const & md ) { return md.type.type == destructorParam.type.type; } ); + if ( structureMemberIt != structureIt->second.members.end() ) + { + assert( constructorParam.type.isConstPointer() && constructorParam.arraySizes.empty() && + constructorParam.len.empty() && !constructorParam.optional ); + initializationList += + startLowerCase( stripPrefix( constructorParam.name, "p" ) ) + "." + structureMemberIt->name; +#if !defined( NDEBUG ) + found = true; +#endif + break; + } + } + } + assert( found ); + } + initializationList += " ), "; + } + else if ( destructorParam.type.type == "VkAllocationCallbacks" ) + { + assert( destructorParam.type.isConstPointer() && destructorParam.arraySizes.empty() && + destructorParam.len.empty() && destructorParam.optional ); + initializationList += + "m_allocator( reinterpret_cast( static_cast( allocator ) ) ), "; + } + else + { + // we can ignore all other parameters here ! + } + } + } + } + else if ( hasSecondLevelCommands ) + { + auto const & param = constructorIt->second.params.front(); + initializationList = "m_" + startLowerCase( stripPrefix( param.type.type, "Vk" ) ) + "( *" + param.name + " ), "; + } + return initializationList; +} + +std::string VulkanHppGenerator::constructRAIIHandleConstructorResult( + std::pair const & handle, + std::map::const_iterator constructorIt, + std::string const & enter, + std::string const & leave ) const +{ + std::string callArguments = constructRAIIHandleConstructorCallArguments( + handle.first, constructorIt->second.params, false, INVALID_INDEX, handle.second.destructorIt != m_commands.end() ); + std::string constructorArguments; + if ( handle.first == "VkInstance" ) + { + constructorArguments = "VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Context const & context"; + } + constructorArguments += constructRAIIHandleConstructorArguments( + handle.first, constructorIt->second.params, false, handle.first == "VkInstance" ); + std::string dispatcherArgument = + ( ( handle.first == "VkInstance" ) ? "context" : constructorIt->second.params[0].name ) + ".getDispatcher()"; + std::string dispatcherInit; + if ( ( handle.first == "VkDevice" ) || ( handle.first == "VkInstance" ) ) + { + dispatcherArgument = "*" + dispatcherArgument; + dispatcherInit = "\n m_dispatcher.init( m_" + startLowerCase( stripPrefix( handle.first, "Vk" ) ) + " );"; + } + std::string initializationList = constructRAIIHandleConstructorInitializationList( + handle.first, constructorIt, handle.second.destructorIt, !handle.second.secondLevelCommands.empty() ); + + const std::string constructorTemplate = + R"( +${enter} ${handleType}( ${constructorArguments} ) + : ${initializationList}m_dispatcher( ${dispatcherArgument} ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->${constructorCall}( ${callArguments} ) ); + if ( ${failureCheck} ) + { + throwResultException( result, "${constructorCall}" ); + }${dispatcherInit} + } +${leave})"; + + return replaceWithMap( constructorTemplate, + { { "callArguments", callArguments }, + { "constructorArguments", constructorArguments }, + { "constructorCall", constructorIt->first }, + { "dispatcherArgument", dispatcherArgument }, + { "dispatcherInit", dispatcherInit }, + { "enter", enter }, + { "failureCheck", constructFailureCheck( constructorIt->second.successCodes ) }, + { "leave", leave }, + { "handleType", stripPrefix( handle.first, "Vk" ) }, + { "initializationList", initializationList } } ); +} + +std::string VulkanHppGenerator::constructRAIIHandleConstructorTakeOwnership( + std::pair const & handle ) const +{ + std::string handleType = stripPrefix( handle.first, "Vk" ); + std::string paramType; + std::string constructorArguments, dispatcherArgument, initializationList; + if ( handle.first == "VkInstance" ) + { + constructorArguments = "VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Context const & context"; + dispatcherArgument = "context.getDispatcher()"; + } + else + { + assert( !handle.second.constructorIts.empty() && !handle.second.constructorIts.front()->second.params.empty() ); + auto param = handle.second.constructorIts.front()->second.params.begin(); + assert( isHandleType( param->type.type ) && param->type.isValue() ); + paramType = stripPrefix( param->type.type, "Vk" ); + constructorArguments = + "VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::" + paramType + " const & " + startLowerCase( paramType ); + dispatcherArgument = startLowerCase( paramType ) + ".getDispatcher()"; + } + if ( ( handle.first == "VkDevice" ) || ( handle.first == "VkInstance" ) ) + { + dispatcherArgument = "*" + dispatcherArgument; + } + std::string handleName = startLowerCase( handleType ); + constructorArguments += ", " + handle.first + " " + handleName; + initializationList += "m_" + handleName + "( " + handleName + " ), "; + if ( handle.second.destructorIt != m_commands.end() ) + { + std::vector params = handle.second.destructorIt->second.params; + auto paramIt = std::find_if( + params.begin(), params.end(), [¶mType]( ParamData const & pd ) { return pd.type.type == "Vk" + paramType; } ); + if ( paramIt != params.end() ) + { + params.erase( paramIt ); + } + constructorArguments += constructRAIIHandleConstructorArguments( handle.first, params, false, true ); + initializationList += + constructRAIIHandleConstructorInitializationList( handle.first, + handle.second.destructorIt, + handle.second.destructorIt, + !handle.second.secondLevelCommands.empty() ); + } + else if ( !handle.second.secondLevelCommands.empty() ) + { + initializationList += "m_" + startLowerCase( paramType ) + "( *" + startLowerCase( paramType ) + " ), "; + } + + std::string dispatcherInit; + if ( ( handle.first == "VkDevice" ) || ( handle.first == "VkInstance" ) ) + { + dispatcherInit = + "\n m_dispatcher.init( m_" + startLowerCase( stripPrefix( handle.first, "Vk" ) ) + " );\n "; + } + + const std::string constructorTemplate = + R"( + ${handleType}( ${constructorArguments} ) + : ${initializationList}m_dispatcher( ${dispatcherArgument} ) + {${dispatcherInit}} +)"; + + return replaceWithMap( constructorTemplate, + { { "constructorArguments", constructorArguments }, + { "dispatcherArgument", dispatcherArgument }, + { "dispatcherInit", dispatcherInit }, + { "handleType", handleType }, + { "initializationList", initializationList } } ); +} + +std::pair + VulkanHppGenerator::constructRAIIHandleConstructors( std::pair const & handle ) const +{ + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( handle.first, !handle.second.alias.empty() ); + + std::string singularConstructors, arrayConstructors; + for ( auto constructorIt : handle.second.constructorIts ) + { + // there is a non-const parameter with handle type : the to-be-constructed handle + + // check for additional enter/leave guards for the constructors + std::string constructorEnter, constructorLeave; + std::tie( constructorEnter, constructorLeave ) = + generateProtection( constructorIt->second.feature, constructorIt->second.extensions ); + if ( constructorEnter == enter ) + { + constructorEnter.clear(); + constructorLeave.clear(); + } + + std::string arrayConstructor, singularConstructor; + std::tie( singularConstructor, arrayConstructor ) = + constructRAIIHandleConstructor( handle, constructorIt, constructorEnter, constructorLeave ); + arrayConstructors += arrayConstructor; + singularConstructors += singularConstructor; + } + singularConstructors += constructRAIIHandleConstructorTakeOwnership( handle ); + return std::make_pair( singularConstructors, arrayConstructors ); +} + +std::string VulkanHppGenerator::constructRAIIHandleConstructorVector( + std::pair const & handle, + std::map::const_iterator constructorIt, + std::vector::const_iterator handleParamIt, + std::string const & enter, + std::string const & leave ) const +{ + std::string vectorSize; + auto lenIt = std::find_if( constructorIt->second.params.begin(), + constructorIt->second.params.end(), + [&handleParamIt]( ParamData const & pd ) { return pd.name == handleParamIt->len; } ); + if ( lenIt == constructorIt->second.params.end() ) + { + std::vector lenParts = tokenize( handleParamIt->len, "->" ); + assert( lenParts.size() == 2 ); + lenIt = std::find_if( constructorIt->second.params.begin(), + constructorIt->second.params.end(), + [&lenParts]( ParamData const & pd ) { return pd.name == lenParts[0]; } ); + assert( lenIt != constructorIt->second.params.end() ); + auto structureIt = m_structures.find( lenIt->type.type ); + assert( structureIt != m_structures.end() ); + assert( std::find_if( structureIt->second.members.begin(), + structureIt->second.members.end(), + [&lenParts]( MemberData const & md ) { return md.name == lenParts[1]; } ) != + structureIt->second.members.end() ); + assert( constructorIt->second.successCodes.size() == 1 ); + vectorSize = startLowerCase( stripPrefix( lenParts[0], "p" ) ) + "." + lenParts[1]; + } + else + { + auto arrayIt = std::find_if( constructorIt->second.params.begin(), + constructorIt->second.params.end(), + [&lenIt, &handleParamIt]( ParamData const & pd ) { + return ( pd.len == lenIt->name ) && ( pd.name != handleParamIt->name ); + } ); + assert( arrayIt != constructorIt->second.params.end() ); + vectorSize = startLowerCase( stripPrefix( arrayIt->name, "p" ) ) + ".size()"; + } + + std::string handleConstructorArguments = constructRAIIHandleSingularConstructorArguments( handle, constructorIt ); + std::string handleType = stripPrefix( handle.first, "Vk" ); + std::string successCodePassToElement = ( 1 < constructorIt->second.successCodes.size() ) ? "result," : ""; + + const std::string constructorTemplate = + R"( +${enter} ${handleType}s( ${constructorArguments} ) + { + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher = ${parentName}.getDispatcher(); + std::vector<${vectorElementType}> ${vectorName}( ${vectorSize} ); + VULKAN_HPP_NAMESPACE::Result result = static_cast( dispatcher->${constructorCall}( ${callArguments} ) ); + if ( ${successCheck} ) + { + this->reserve( ${vectorSize} ); + for ( auto const & ${handleName} : ${vectorName} ) + { + this->emplace_back( ${handleConstructorArguments}, ${successCodePassToElement} dispatcher ); + } + } + else + { + throwResultException( result, "${constructorCall}" ); + } + } +${leave})"; + + return replaceWithMap( + constructorTemplate, + { { "callArguments", + constructRAIIHandleConstructorCallArguments( + handle.first, constructorIt->second.params, false, INVALID_INDEX, false ) }, + { "constructorArguments", + constructRAIIHandleConstructorArguments( handle.first, constructorIt->second.params, false, false ) }, + { "constructorCall", constructorIt->first }, + { "enter", enter }, + { "handleConstructorArguments", handleConstructorArguments }, + { "handleName", startLowerCase( handleType ) }, + { "handleType", handleType }, + { "leave", leave }, + { "parentName", constructorIt->second.params.front().name }, + { "successCheck", constructSuccessCheck( constructorIt->second.successCodes ) }, + { "successCodePassToElement", successCodePassToElement }, + { "vectorElementType", handleParamIt->type.type }, + { "vectorName", startLowerCase( stripPrefix( handleParamIt->name, "p" ) ) }, + { "vectorSize", vectorSize } } ); +} + +std::string VulkanHppGenerator::constructRAIIHandleConstructorVectorSingular( + std::pair const & handle, + std::map::const_iterator constructorIt, + std::vector::const_iterator handleParamIt, + std::string const & enter, + std::string const & leave ) const +{ + std::string callArguments = + constructRAIIHandleConstructorCallArguments( handle.first, + constructorIt->second.params, + false, + std::distance( constructorIt->second.params.begin(), handleParamIt ), + true ); + std::string initializationList = constructRAIIHandleConstructorInitializationList( + handle.first, constructorIt, handle.second.destructorIt, !handle.second.secondLevelCommands.empty() ); + std::string failureCheck = constructFailureCheck( constructorIt->second.successCodes ); + replaceAll( failureCheck, "result", "m_constructorSuccessCode" ); + + const std::string singularConstructorTemplate = + R"( +${enter} ${handleType}( ${constructorArguments} ) + : ${initializationList}m_dispatcher( ${firstArgument}.getDispatcher() ) + { + m_constructorSuccessCode = static_cast( getDispatcher()->${constructorCall}( ${callArguments} ) ); + if ( ${failureCheck} ) + { + throwResultException( m_constructorSuccessCode, "${constructorCall}" ); + } + } +${leave})"; + + return replaceWithMap( + singularConstructorTemplate, + { { "initializationList", initializationList }, + { "callArguments", callArguments }, + { "constructorArguments", + constructRAIIHandleConstructorArguments( handle.first, constructorIt->second.params, true, false ) }, + { "constructorCall", constructorIt->first }, + { "enter", enter }, + { "firstArgument", constructorIt->second.params[0].name }, + { "failureCheck", failureCheck }, + { "leave", leave }, + { "handleType", stripPrefix( handle.first, "Vk" ) } } ); +} + +std::string VulkanHppGenerator::constructRAIIHandleConstructorVoid( + std::pair const & handle, + std::map::const_iterator constructorIt, + std::string const & enter, + std::string const & leave ) const +{ + std::string callArguments = constructRAIIHandleConstructorCallArguments( + handle.first, constructorIt->second.params, false, INVALID_INDEX, true ); + std::string constructorArguments = + constructRAIIHandleConstructorArguments( handle.first, constructorIt->second.params, false, false ); + std::string initializationList = constructRAIIHandleConstructorInitializationList( + handle.first, constructorIt, handle.second.destructorIt, !handle.second.secondLevelCommands.empty() ); + + const std::string constructorTemplate = + R"( +${enter} ${handleType}( ${constructorArguments} ) + : ${initializationList}m_dispatcher( ${firstArgument}.getDispatcher() ) + { + getDispatcher()->${constructorCall}( ${callArguments} ); + } +${leave})"; + + return replaceWithMap( constructorTemplate, + { { "callArguments", callArguments }, + { "constructorArguments", constructorArguments }, + { "constructorCall", constructorIt->first }, + { "enter", enter }, + { "firstArgument", constructorIt->second.params[0].name }, + { "leave", leave }, + { "handleType", stripPrefix( handle.first, "Vk" ) }, + { "initializationList", initializationList } } ); +} + +std::pair + VulkanHppGenerator::constructRAIIHandleDestructor( std::string const & handleType, + std::map::const_iterator destructorIt, + std::string const & enter ) const +{ + std::string destructorEnter, destructorLeave; + std::tie( destructorEnter, destructorLeave ) = + generateProtection( destructorIt->second.feature, destructorIt->second.extensions ); + bool doProtect = !destructorEnter.empty() && ( destructorEnter != enter ); + if ( !doProtect ) + { + destructorEnter.clear(); + destructorLeave.clear(); + } + std::string destructorCall = destructorIt->first + "( " + + constructRAIIHandleDestructorCallArguments( handleType, destructorIt->second.params ) + + " )"; + + const std::string destructorTemplate = R"( +${enter}~${handleType}() + { + if ( m_${handleName} ) + { + getDispatcher()->${destructorCall}; + } + } +${leave})"; + + std::string destructor = replaceWithMap( destructorTemplate, + { { "destructorCall", destructorCall }, + { "enter", destructorEnter }, + { "handleName", startLowerCase( stripPrefix( handleType, "Vk" ) ) }, + { "handleType", stripPrefix( handleType, "Vk" ) }, + { "leave", destructorLeave } } ); + return std::make_pair( destructor, destructorCall ); +} + +std::string + VulkanHppGenerator::constructRAIIHandleDestructorCallArguments( std::string const & handleType, + std::vector const & params ) const +{ + std::string arguments; + bool encounteredArgument = false; + for ( auto param : params ) + { + if ( encounteredArgument ) + { + arguments += ", "; + } + if ( param.type.type == "VkAllocationCallbacks" ) + { + // VkAllocationCallbacks is stored as a member of the handle class + arguments += "m_allocator"; + } + else if ( isHandleType( param.type.type ) ) + { + assert( param.arraySizes.empty() ); + std::string argument = "m_" + startLowerCase( stripPrefix( param.type.type, "Vk" ) ); + if ( param.type.isValue() ) + { + if ( param.type.type == handleType ) + { + argument = "static_cast<" + param.type.type + ">( " + argument + " )"; + } + arguments += argument; + } + else + { + assert( param.type.isConstPointer() ); + assert( !param.len.empty() && ( std::find_if( params.begin(), params.end(), [¶m]( ParamData const & pd ) { + return pd.name == param.len; + } ) != params.end() ) ); + arguments += "reinterpret_cast<" + param.type.type + " const *>( &" + argument + " )"; + } + } + else + { + assert( ( param.type.type == "uint32_t" ) && param.type.isValue() && param.arraySizes.empty() && + param.len.empty() && !param.optional ); + assert( std::find_if( params.begin(), params.end(), [¶m]( ParamData const & pd ) { + return pd.len == param.name; + } ) != params.end() ); + arguments += "1"; + } + encounteredArgument = true; + } + return arguments; +} + +std::tuple + VulkanHppGenerator::constructRAIIHandleDetails( std::pair const & handle, + std::string const & destructorCall ) const +{ + std::string getConstructorSuccessCode, memberVariables, moveConstructorInitializerList, moveAssignmentInstructions; + bool multiSuccessCodeContructor = isMultiSuccessCodeConstructor( handle.second.constructorIts ); + if ( multiSuccessCodeContructor ) + { + getConstructorSuccessCode = R"( + VULKAN_HPP_NAMESPACE::Result getConstructorSuccessCode() const + { + return m_constructorSuccessCode; + } +)"; + } + + std::string handleType = stripPrefix( handle.first, "Vk" ); + std::string handleName = startLowerCase( handleType ); + + memberVariables = " VULKAN_HPP_NAMESPACE::" + handleType + " m_" + handleName + ";"; + moveConstructorInitializerList = + "m_" + handleName + "( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_" + handleName + ", {} ) )"; + moveAssignmentInstructions = " m_" + handleName + + " = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_" + handleName + + ", {} );"; + if ( handle.second.destructorIt != m_commands.end() ) + { + moveAssignmentInstructions = " getDispatcher()->" + destructorCall + ";\n" + moveAssignmentInstructions; + for ( auto const & destructorParam : handle.second.destructorIt->second.params ) + { + if ( ( destructorParam.type.type != handle.first ) && + ( std::find_if( handle.second.destructorIt->second.params.begin(), + handle.second.destructorIt->second.params.end(), + [&destructorParam]( ParamData const & pd ) { return pd.len == destructorParam.name; } ) == + handle.second.destructorIt->second.params.end() ) ) + { + std::string name = destructorParam.name; + if ( !destructorParam.type.isValue() ) + { + name = startLowerCase( stripPrefix( name, "p" ) ); + } + memberVariables += "\n " + destructorParam.type.prefix + " " + destructorParam.type.type + " " + + destructorParam.type.postfix + " m_" + name + ";"; + moveConstructorInitializerList += ", m_" + name + "( rhs.m_" + name + " )"; + moveAssignmentInstructions += "\n m_" + name + " = rhs.m_" + name + ";"; + } + } + } + else if ( !handle.second.secondLevelCommands.empty() ) + { + assert( !handle.second.constructorIts.empty() ); + assert( !handle.second.constructorIts.front()->second.params.empty() ); + auto const & parentType = handle.second.constructorIts.front()->second.params.front().type; + assert( isHandleType( parentType.type ) ); + memberVariables += "\n VULKAN_HPP_NAMESPACE::" + stripPrefix( parentType.type, "Vk" ) + " m_" + + startLowerCase( stripPrefix( parentType.type, "Vk" ) ) + ";"; + } + if ( multiSuccessCodeContructor ) + { + memberVariables += "\n VULKAN_HPP_NAMESPACE::Result m_constructorSuccessCode;"; + } + memberVariables += ( ( handle.first == "VkDevice" ) || ( handle.first == "VkInstance" ) ) + ? "\n VULKAN_HPP_RAII_DISPATCHER_TYPE m_dispatcher;" + : "\n VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher;"; + moveConstructorInitializerList += ", m_dispatcher( rhs.m_dispatcher )"; + moveAssignmentInstructions += "\n m_dispatcher = rhs.m_dispatcher;"; + return std::make_tuple( + getConstructorSuccessCode, memberVariables, moveConstructorInitializerList, moveAssignmentInstructions ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionResultEnumerate( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string commandName = determineCommandName( + commandIt->first, initialSkipCount ? commandIt->second.params[initialSkipCount - 1].type.type : "", m_tags ); + std::string counterName = + startLowerCase( stripPrefix( commandIt->second.params[vectorParamIndices.begin()->second].name, "p" ) ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + std::string firstCallArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, true, INVALID_INDEX, true ); + std::string secondCallArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string vectorElementType = commandIt->second.params[vectorParamIndices.begin()->first].type.type; + if ( !isHandleType( vectorElementType ) ) + { + assert( commandIt->second.params[vectorParamIndices.begin()->first].type.isNonConstPointer() ); + vectorElementType = + ( vectorElementType == "void" ) + ? "uint8_t" + : stripPostfix( commandIt->second.params[vectorParamIndices.begin()->first].type.compose(), "*" ); + } + std::string vectorName = + startLowerCase( stripPrefix( commandIt->second.params[vectorParamIndices.begin()->first].name, "p" ) ); + + std::string const declarationTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD std::vector<${vectorElementType}> ${commandName}( ${argumentList} ) const; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { + { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "vectorElementType", vectorElementType }, + } ); + + const std::string definitionTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD std::vector<${vectorElementType}> ${className}::${commandName}( ${argumentList} ) const + { + std::vector<${vectorElementType}> ${vectorName}; + ${counterType} ${counterName}; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->${vkCommand}( ${firstCallArguments} ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ${counterName} ) + { + ${vectorName}.resize( ${counterName} ); + result = static_cast( getDispatcher()->${vkCommand}( ${secondCallArguments} ) ); + VULKAN_HPP_ASSERT( ${counterName} <= ${vectorName}.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( ${counterName} < ${vectorName}.size() ) ) + { + ${vectorName}.resize( ${counterName} ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING"::${className}::${commandName}" ); + } + return ${vectorName}; + } +${leave})"; + + std::string definition = replaceWithMap( + definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "className", + initialSkipCount ? stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) : "Context" }, + { "commandName", commandName }, + { "counterName", counterName }, + { "counterType", commandIt->second.params[vectorParamIndices.begin()->second].type.type }, + { "enter", enter }, + { "firstCallArguments", firstCallArguments }, + { "leave", leave }, + { "secondCallArguments", secondCallArguments }, + { "vectorElementType", vectorElementType }, + { "vectorName", vectorName }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionResultEnumerateTwoVectors( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + auto firstVectorParamIt = vectorParamIndices.begin(); + auto secondVectorParamIt = std::next( firstVectorParamIt ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string counterName = + startLowerCase( stripPrefix( stripPluralS( commandIt->second.params[firstVectorParamIt->second].name ), "p" ) ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + std::string firstCallArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, true, INVALID_INDEX, true ); + std::string firstType = stripPrefix( commandIt->second.params[firstVectorParamIt->first].type.type, "Vk" ); + std::string firstVectorName = + startLowerCase( stripPrefix( commandIt->second.params[firstVectorParamIt->first].name, "p" ) ); + std::string secondCallArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string secondType = stripPrefix( commandIt->second.params[secondVectorParamIt->first].type.type, "Vk" ); + std::string secondVectorName = + startLowerCase( stripPrefix( commandIt->second.params[secondVectorParamIt->first].name, "p" ) ); + + std::string const declarationTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD std::pair, std::vector<${secondType}>> ${commandName}( ${argumentList} ) const; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "firstType", firstType }, + { "leave", leave }, + { "secondType", secondType } } ); + + const std::string definitionTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD std::pair, std::vector<${secondType}>> ${className}::${commandName}( ${argumentList} ) const + { + std::pair, std::vector<${secondType}>> data; + std::vector<${firstType}> & ${firstVectorName} = data.first; + std::vector<${secondType}> & ${secondVectorName} = data.second; + ${counterType} ${counterName}; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->${vkCommand}( ${firstCallArguments} ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && counterCount ) + { + ${firstVectorName}.resize( ${counterName} ); + ${secondVectorName}.resize( ${counterName} ); + result = static_cast( getDispatcher()->${vkCommand}( ${secondCallArguments} ) ); + VULKAN_HPP_ASSERT( ${counterName} <= ${firstVectorName}.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( ${counterName} < ${firstVectorName}.size() ) ) + { + ${firstVectorName}.resize( ${counterName} ); + ${secondVectorName}.resize( ${counterName} ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING"::${className}::${commandName}" ); + } + return data; + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "counterName", counterName }, + { "counterType", commandIt->second.params[firstVectorParamIt->second].type.type }, + { "enter", enter }, + { "firstCallArguments", firstCallArguments }, + { "firstType", firstType }, + { "firstVectorName", firstVectorName }, + { "leave", leave }, + { "secondCallArguments", secondCallArguments }, + { "secondType", secondType }, + { "secondVectorName", secondVectorName }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionResultMulti( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const +{ + std::set skippedParameters = + determineSkippedParams( commandIt->second.params, initialSkipCount, vectorParamIndices, {}, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + + std::pair>> vectorSizeCheck = needsVectorSizeCheck( vectorParamIndices ); + std::string vectorSizeCheckString = + vectorSizeCheck.first + ? constructRAIIHandleVectorSizeCheck( + commandIt->first, commandIt->second, initialSkipCount, vectorSizeCheck.second, skippedParameters ) + : ""; + + std::string const declarationTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result ${commandName}( ${argumentList} ) const; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave } } ); + + std::string const definitionTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result ${className}::${commandName}( ${argumentList} ) const + {${vectorSizeCheck} + VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->${vkCommand}( ${callArguments} ) ); + if ( ${failureCheck} ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING"::${className}::${commandName}" ); + } + return result; + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "enter", enter }, + { "failureCheck", constructFailureCheck( commandIt->second.successCodes ) }, + { "leave", leave }, + { "vectorSizeCheck", vectorSizeCheckString }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionResultMultiGetValue( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + std::string returnType = stripPostfix( commandIt->second.params[nonConstPointerParamIndices[0]].type.compose(), "*" ); + std::string valueName = + startLowerCase( stripPrefix( commandIt->second.params[nonConstPointerParamIndices[0]].name, "p" ) ); + + std::string const declarationTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD std::pair ${commandName}( ${argumentList} ) const; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { + { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "returnType", returnType }, + } ); + + std::string const definitionTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD std::pair ${className}::${commandName}( ${argumentList} ) const + { + ${returnType} ${valueName}; + VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->${vkCommand}( ${callArguments} ) ); + if ( ${failureCheck} ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING"::${className}::${commandName}" ); + } + return std::make_pair( result, ${valueName} ); + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "enter", enter }, + { "failureCheck", constructFailureCheck( commandIt->second.successCodes ) }, + { "leave", leave }, + { "valueName", valueName }, + { "returnType", returnType }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionResultMultiGetVectorOfVoid( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string dataName = + startLowerCase( stripPrefix( commandIt->second.params[nonConstPointerParamIndices[0]].name, "p" ) ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + + std::string const declarationTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD std::pair> ${commandName}( ${argumentList} ) const; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave } } ); + + std::string const definitionTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD std::pair> ${className}::${commandName}( ${argumentList} ) const + { + VULKAN_HPP_ASSERT( ${dataSize} % sizeof( T ) == 0 ); + std::vector ${dataName}( ${dataSize} / sizeof( T ) ); + Result result = static_cast( getDispatcher()->${vkCommand}( ${callArguments} ) ); + if ( ${failureCheck} ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING"::${className}::${commandName}" ); + } + return std::make_pair( result, ${dataName} ); + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "dataName", dataName }, + { "dataSize", commandIt->second.params[nonConstPointerParamIndices[0]].len }, + { "enter", enter }, + { "failureCheck", constructFailureCheck( commandIt->second.successCodes ) }, + { "leave", leave }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair + VulkanHppGenerator::constructRAIIHandleMemberFunctionResultMultiGetVectorOfVoidSingular( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, true ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, nonConstPointerParamIndices[0], false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, nonConstPointerParamIndices[0], true, false, false, false ); + std::string callArguments = constructCallArgumentsEnhanced( + commandIt->second.params, initialSkipCount, false, nonConstPointerParamIndices[0], true ); + std::string commandName = stripPluralS( + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ) ); + std::string dataName = + startLowerCase( stripPrefix( commandIt->second.params[nonConstPointerParamIndices[0]].name, "p" ) ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + + std::string const singularDeclarationTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD std::pair ${commandName}( ${argumentList} ) const; +${leave})"; + + std::string declaration = replaceWithMap( singularDeclarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave } } ); + + std::string const singularDefinitionTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD std::pair ${className}::${commandName}( ${argumentList} ) const + { + T ${dataName}; + Result result = static_cast( getDispatcher()->${vkCommand}( ${callArguments} ) ); + if ( ${failureCheck} ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING"::${className}::${commandName}" ); + } + return std::make_pair( result, ${dataName} ); + } +${leave})"; + + std::string definition = + replaceWithMap( singularDefinitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "dataName", dataName }, + { "enter", enter }, + { "failureCheck", constructFailureCheck( commandIt->second.successCodes ) }, + { "leave", leave }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionResultMultiNoErrors( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const +{ + std::set skippedParameters = + determineSkippedParams( commandIt->second.params, initialSkipCount, vectorParamIndices, {}, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + + std::pair>> vectorSizeCheck = needsVectorSizeCheck( vectorParamIndices ); + std::string vectorSizeCheckString = + vectorSizeCheck.first + ? constructRAIIHandleVectorSizeCheck( + commandIt->first, commandIt->second, initialSkipCount, vectorSizeCheck.second, skippedParameters ) + : ""; + + std::string const declarationTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result ${commandName}( ${argumentList} ) const VULKAN_HPP_NOEXCEPT; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave } } ); + + std::string const definitionTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result ${className}::${commandName}( ${argumentList} ) const VULKAN_HPP_NOEXCEPT + {${vectorSizeCheck} + return static_cast( getDispatcher()->${vkCommand}( ${callArguments} ) ); + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "vectorSizeCheck", vectorSizeCheckString }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionResultSingle( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const +{ + std::set skippedParameters = + determineSkippedParams( commandIt->second.params, initialSkipCount, vectorParamIndices, {}, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + + std::pair>> vectorSizeCheck = needsVectorSizeCheck( vectorParamIndices ); + std::string vectorSizeCheckString = + vectorSizeCheck.first + ? constructRAIIHandleVectorSizeCheck( + commandIt->first, commandIt->second, initialSkipCount, vectorSizeCheck.second, skippedParameters ) + : ""; + + std::string const declarationTemplate = + R"( +${enter} void ${commandName}( ${argumentList} ) const; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave } } ); + + std::string const definitionTemplate = + R"( +${enter} VULKAN_HPP_INLINE void ${className}::${commandName}( ${argumentList} ) const + {${vectorSizeCheck} + VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->${vkCommand}( ${callArguments} ) ); + if ( ${failureCheck} ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING"::${className}::${commandName}" ); + } + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "enter", enter }, + { "failureCheck", constructFailureCheck( commandIt->second.successCodes ) }, + { "leave", leave }, + { "vectorSizeCheck", vectorSizeCheckString }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionResultSingleGetChain( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + std::string returnType = stripPostfix( commandIt->second.params[nonConstPointerParamIndices[0]].type.compose(), "*" ); + std::string returnVariable = + startLowerCase( stripPrefix( commandIt->second.params[nonConstPointerParamIndices[0]].name, "p" ) ); + + std::string const declarationTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD StructureChain ${commandName}( ${argumentList} ) const; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave } } ); + + std::string const functionTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD StructureChain ${className}::${commandName}( ${argumentList} ) const + { + StructureChain structureChain; + ${returnType} & ${returnVariable} = structureChain.template get<${returnType}>(); + VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->${vkCommand}( ${callArguments} ) ); + if ( ${failureCheck} ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING"::${className}::${commandName}" ); + } + return structureChain; + } +${leave})"; + + std::string definition = + replaceWithMap( functionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "enter", enter }, + { "failureCheck", constructFailureCheck( commandIt->second.successCodes ) }, + { "leave", leave }, + { "returnVariable", returnVariable }, + { "returnType", returnType }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionResultSingleGetVectorAndValue( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + std::string valueName = + startLowerCase( stripPrefix( commandIt->second.params[nonConstPointerParamIndices[1]].name, "p" ) ); + std::string valueType = commandIt->second.params[nonConstPointerParamIndices[1]].type.type; + std::string vectorElementType = commandIt->second.params[nonConstPointerParamIndices[0]].type.type; + std::string vectorName = + startLowerCase( stripPrefix( commandIt->second.params[nonConstPointerParamIndices[0]].name, "p" ) ); + std::string vectorSize = + startLowerCase( stripPrefix( commandIt->second.params[vectorParamIndices.begin()->first].name, "p" ) ) + ".size()"; + + std::string const declarationTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD std::pair, ${valueType}> ${commandName}( ${argumentList} ) const; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { + { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "valueType", valueType }, + { "vectorElementType", vectorElementType }, + } ); + + std::string const definitionTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD std::pair, ${valueType}> ${className}::${commandName}( ${argumentList} ) const + { + std::pair, ${valueType}> data( std::piecewise_construct, std::forward_as_tuple( ${vectorSize} ), std::forward_as_tuple( 0 ) ); + std::vector<${vectorElementType}> & ${vectorName} = data.first; + ${valueType} & ${valueName} = data.second; + VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->${vkCommand}( ${callArguments} ) ); + if ( ${failureCheck} ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING"::${className}::${commandName}" ); + } + return data; + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "enter", enter }, + { "failureCheck", constructFailureCheck( commandIt->second.successCodes ) }, + { "leave", leave }, + { "valueName", valueName }, + { "valueType", valueType }, + { "vectorElementType", vectorElementType }, + { "vectorName", vectorName }, + { "vectorSize", vectorSize }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionResultSingleGetVectorOfVoid( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string dataName = + startLowerCase( stripPrefix( commandIt->second.params[nonConstPointerParamIndices[0]].name, "p" ) ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + + std::string const declarationTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD std::vector ${commandName}( ${argumentList} ) const; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave } } ); + + std::string const definitionTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD std::vector ${className}::${commandName}( ${argumentList} ) const + { + VULKAN_HPP_ASSERT( ${dataSize} % sizeof( T ) == 0 ); + std::vector ${dataName}( ${dataSize} / sizeof( T ) ); + Result result = static_cast( getDispatcher()->${vkCommand}( ${callArguments} ) ); + if ( ${failureCheck} ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING"::${className}::${commandName}" ); + } + return ${dataName}; + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "dataName", dataName }, + { "dataSize", commandIt->second.params[nonConstPointerParamIndices[0]].len }, + { "enter", enter }, + { "failureCheck", constructFailureCheck( commandIt->second.successCodes ) }, + { "leave", leave }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair + VulkanHppGenerator::constructRAIIHandleMemberFunctionResultSingleGetVectorOfVoidSingular( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, true ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, nonConstPointerParamIndices[0], false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, nonConstPointerParamIndices[0], true, false, false, false ); + std::string callArguments = constructCallArgumentsEnhanced( + commandIt->second.params, initialSkipCount, false, nonConstPointerParamIndices[0], true ); + std::string commandName = stripPluralS( + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ) ); + std::string dataName = + startLowerCase( stripPrefix( commandIt->second.params[nonConstPointerParamIndices[0]].name, "p" ) ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + + std::string const singularDeclarationTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD T ${commandName}( ${argumentList} ) const; +${leave})"; + + std::string declaration = replaceWithMap( singularDeclarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave } } ); + + std::string const singularDefinitionTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD T ${className}::${commandName}( ${argumentList} ) const + { + T ${dataName}; + Result result = static_cast( getDispatcher()->${vkCommand}( ${callArguments} ) ); + if ( ${failureCheck} ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING"::${className}::${commandName}" ); + } + return ${dataName}; + } +${leave})"; + + std::string definition = + replaceWithMap( singularDefinitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "dataName", dataName }, + { "enter", enter }, + { "failureCheck", constructFailureCheck( commandIt->second.successCodes ) }, + { "leave", leave }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionResultSingleNoErrors( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const +{ + std::set skippedParameters = + determineSkippedParams( commandIt->second.params, initialSkipCount, vectorParamIndices, {}, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + + std::pair>> vectorSizeCheck = needsVectorSizeCheck( vectorParamIndices ); + std::string vectorSizeCheckString = + vectorSizeCheck.first + ? constructRAIIHandleVectorSizeCheck( + commandIt->first, commandIt->second, initialSkipCount, vectorSizeCheck.second, skippedParameters ) + : ""; + + std::string const declarationTemplate = + R"( +${enter} void ${commandName}( ${argumentList} ) const VULKAN_HPP_NOEXCEPT; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave } } ); + + std::string const definitionTemplate = + R"( +${enter} VULKAN_HPP_INLINE void ${className}::${commandName}( ${argumentList} ) const VULKAN_HPP_NOEXCEPT + {${vectorSizeCheck} + getDispatcher()->${vkCommand}( ${callArguments} ); + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "vectorSizeCheck", vectorSizeCheckString }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionResultSingleGetValue( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + assert( nonConstPointerParamIndices.size() == 1 ); + + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = determineCommandName( + commandIt->first, initialSkipCount ? commandIt->second.params[initialSkipCount - 1].type.type : "", m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + std::string returnType = stripPostfix( commandIt->second.params[nonConstPointerParamIndices[0]].type.compose(), "*" ); + std::string valueName = + startLowerCase( stripPrefix( commandIt->second.params[nonConstPointerParamIndices[0]].name, "p" ) ); + + std::string const declarationTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD ${returnType} ${commandName}( ${argumentList} ) const; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { + { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "returnType", returnType }, + } ); + + std::string const definitionTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD ${returnType} ${className}::${commandName}( ${argumentList} ) const + { + ${returnType} ${valueName}; + VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->${vkCommand}( ${callArguments} ) ); + if ( ${failureCheck} ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING"::${className}::${commandName}" ); + } + return ${valueName}; + } +${leave})"; + + std::string definition = replaceWithMap( + definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", + initialSkipCount ? stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) : "Context" }, + { "commandName", commandName }, + { "enter", enter }, + { "failureCheck", constructFailureCheck( commandIt->second.successCodes ) }, + { "leave", leave }, + { "valueName", valueName }, + { "returnType", returnType }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +void VulkanHppGenerator::constructRAIIHandleMemberFunction( std::string & functionDeclarations, + std::string & functionDefinitions, + std::string const & command, + size_t initialSkipCount, + std::set const & specialFunctions ) const +{ + if ( specialFunctions.find( command ) == specialFunctions.end() ) + { + bool commandConstructed = false; + auto commandIt = m_commands.find( command ); + assert( commandIt != m_commands.end() ); + + std::map vectorParamIndices = determineVectorParamIndicesNew( commandIt->second.params ); + std::vector nonConstPointerParamIndices = determineNonConstPointerParamIndices( commandIt->second.params ); + + std::string declaration, definition; + switch ( nonConstPointerParamIndices.size() ) + { + case 0: + if ( commandIt->second.returnType == "VkResult" ) + { + // as the returnType is "VkResult", there has to be at least one success code + assert( !commandIt->second.successCodes.empty() ); + if ( commandIt->second.errorCodes.empty() ) + { + if ( commandIt->second.successCodes.size() == 1 ) + { + std::tie( declaration, definition ) = constructRAIIHandleMemberFunctionResultSingleNoErrors( + commandIt, initialSkipCount, vectorParamIndices ); + } + else + { + std::tie( declaration, definition ) = + constructRAIIHandleMemberFunctionResultMultiNoErrors( commandIt, initialSkipCount, vectorParamIndices ); + } + } + else + { + if ( commandIt->second.successCodes.size() == 1 ) + { + std::tie( declaration, definition ) = + constructRAIIHandleMemberFunctionResultSingle( commandIt, initialSkipCount, vectorParamIndices ); + } + else + { + std::tie( declaration, definition ) = + constructRAIIHandleMemberFunctionResultMulti( commandIt, initialSkipCount, vectorParamIndices ); + } + } + } + else + { + // as the return type is not "VkResult", there are no success or error codes allowed + assert( commandIt->second.successCodes.empty() && commandIt->second.errorCodes.empty() ); + if ( commandIt->second.returnType == "void" ) + { + std::tie( declaration, definition ) = + constructRAIIHandleMemberFunctionVoid( commandIt, initialSkipCount, vectorParamIndices ); + } + else if ( beginsWith( commandIt->second.returnType, "Vk" ) ) + { + std::tie( declaration, definition ) = + constructRAIIHandleMemberFunctionVkType( commandIt, initialSkipCount, vectorParamIndices ); + } + else + { + std::tie( declaration, definition ) = + constructRAIIHandleMemberFunctionType( commandIt, initialSkipCount, vectorParamIndices ); + } + } + commandConstructed = true; + break; + case 1: + // one return parameter (and it's not a handle, that would be a constructor) + assert( !isHandleType( commandIt->second.params[nonConstPointerParamIndices[0]].type.type ) ); + { + auto returnVectorParamIt = vectorParamIndices.find( nonConstPointerParamIndices[0] ); + if ( returnVectorParamIt == vectorParamIndices.end() ) + { + // the return parameter is not a vector -> return a value + if ( commandIt->second.returnType == "VkResult" ) + { + // as the returnType is "VkResult", there has to be at least one success code + assert( !commandIt->second.successCodes.empty() ); + if ( !commandIt->second.errorCodes.empty() ) + { + if ( commandIt->second.successCodes.size() == 1 ) + { + std::tie( declaration, definition ) = constructRAIIHandleMemberFunctionResultSingleGetValue( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + } + else + { + std::tie( declaration, definition ) = constructRAIIHandleMemberFunctionResultMultiGetValue( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + } + commandConstructed = true; + } + } + else + { + // as the return type is not "VkResult", there are no success or error codes allowed + assert( commandIt->second.successCodes.empty() && commandIt->second.errorCodes.empty() ); + if ( commandIt->second.returnType == "void" ) + { + std::tie( declaration, definition ) = constructRAIIHandleMemberFunctionVoidGetValue( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + commandConstructed = true; + } + } + } + else + { + // the return parameter is a vector -> return a vector + if ( ( commandIt->second.params[returnVectorParamIt->first].type.type == "void" ) && + ( commandIt->second.params[returnVectorParamIt->second].type.isValue() ) ) + { + // the return parameter is of type void, and the size is given by value + if ( commandIt->second.returnType == "VkResult" ) + { + // as the returnType is "VkResult", there has to be at least one success code + assert( !commandIt->second.successCodes.empty() ); + if ( !commandIt->second.errorCodes.empty() ) + { + if ( commandIt->second.successCodes.size() == 1 ) + { + std::tie( declaration, definition ) = constructRAIIHandleMemberFunctionResultSingleGetVectorOfVoid( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + + std::string singularDeclaration, singularDefinition; + std::tie( singularDeclaration, singularDefinition ) = + constructRAIIHandleMemberFunctionResultSingleGetVectorOfVoidSingular( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + declaration += singularDeclaration; + definition += singularDefinition; + } + else + { + std::tie( declaration, definition ) = constructRAIIHandleMemberFunctionResultMultiGetVectorOfVoid( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + + std::string singularDeclaration, singularDefinition; + std::tie( singularDeclaration, singularDefinition ) = + constructRAIIHandleMemberFunctionResultMultiGetVectorOfVoidSingular( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + declaration += singularDeclaration; + definition += singularDefinition; + } + commandConstructed = true; + } + } + } + } + if ( isStructureChainAnchor( commandIt->second.params[nonConstPointerParamIndices[0]].type.type ) ) + { + // for StructureChain returns, add functions returning a StructureChain + commandConstructed = false; + if ( returnVectorParamIt == vectorParamIndices.end() ) + { + if ( commandIt->second.returnType == "VkResult" ) + { + // as the returnType is "VkResult", there has to be at least one success code + assert( !commandIt->second.successCodes.empty() ); + if ( !commandIt->second.errorCodes.empty() ) + { + if ( commandIt->second.successCodes.size() == 1 ) + { + std::string chainDeclaration, chainDefinition; + std::tie( chainDeclaration, chainDefinition ) = + constructRAIIHandleMemberFunctionResultSingleGetChain( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + declaration += chainDeclaration; + definition += chainDefinition; + commandConstructed = true; + } + } + } + else + { + // as the return type is not "VkResult", there are no success or error codes allowed + assert( commandIt->second.successCodes.empty() && commandIt->second.errorCodes.empty() ); + if ( commandIt->second.returnType == "void" ) + { + std::string chainDeclaration, chainDefinition; + std::tie( chainDeclaration, chainDefinition ) = constructRAIIHandleMemberFunctionVoidGetChain( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + declaration += chainDeclaration; + definition += chainDefinition; + commandConstructed = true; + } + } + } + } + } + break; + case 2: + // two return parameters (and the first one is not a handle, that would be a constructor) + assert( !isHandleType( commandIt->second.params[nonConstPointerParamIndices[0]].type.type ) ); + switch ( vectorParamIndices.size() ) + { + case 1: + { + // two returns but just one vector + auto vectorParamIndexIt = vectorParamIndices.begin(); + if ( ( vectorParamIndexIt->second == nonConstPointerParamIndices[0] ) && + ( vectorParamIndexIt->first == nonConstPointerParamIndices[1] ) ) + { + // the size is a return value as well -> enumerate the values + if ( commandIt->second.returnType == "VkResult" ) + { + // as the returnType is "VkResult", there has to be at least one success code + assert( !commandIt->second.successCodes.empty() ); + if ( !commandIt->second.errorCodes.empty() ) + { + if ( ( commandIt->second.successCodes.size() == 2 ) && + ( commandIt->second.successCodes[0] == "VK_SUCCESS" ) && + ( commandIt->second.successCodes[1] == "VK_INCOMPLETE" ) ) + { + std::tie( declaration, definition ) = constructRAIIHandleMemberFunctionResultEnumerate( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + commandConstructed = true; + } + } + } + else + { + if ( commandIt->second.returnType == "void" ) + { + std::tie( declaration, definition ) = constructRAIIHandleMemberFunctionVoidEnumerate( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + commandConstructed = true; + } + } + } + } + break; + case 2: + // two returns and two vectors + if ( ( vectorParamIndices.find( nonConstPointerParamIndices[0] ) != vectorParamIndices.end() ) && + ( vectorParamIndices.find( nonConstPointerParamIndices[1] ) == vectorParamIndices.end() ) ) + { + // two returns and two vectors! But one input vector, one output vector of the same size, and one output + // value + if ( commandIt->second.returnType == "VkResult" ) + { + // as the returnType is "VkResult", there has to be at least one success code + assert( !commandIt->second.successCodes.empty() ); + if ( !commandIt->second.errorCodes.empty() ) + { + if ( commandIt->second.successCodes.size() == 1 ) + { + std::tie( declaration, definition ) = + constructRAIIHandleMemberFunctionResultSingleGetVectorAndValue( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + commandConstructed = true; + } + } + } + } + break; + } + if ( isStructureChainAnchor( commandIt->second.params[nonConstPointerParamIndices[0]].type.type ) ) + { + commandConstructed = false; + } + if ( isStructureChainAnchor( commandIt->second.params[nonConstPointerParamIndices[1]].type.type ) ) + { + commandConstructed = false; + if ( vectorParamIndices.size() == 1 ) + { + // two returns but just one vector + auto vectorParamIndexIt = vectorParamIndices.begin(); + if ( ( vectorParamIndexIt->second == nonConstPointerParamIndices[0] ) && + ( vectorParamIndexIt->first == nonConstPointerParamIndices[1] ) ) + { + // the size is a return value as well -> enumerate the values + std::string vectorElementType = commandIt->second.params[vectorParamIndices.begin()->first].type.type; + if ( commandIt->second.returnType == "void" ) + { + std::string chainDeclaration, chainDefinition; + std::tie( chainDeclaration, chainDefinition ) = constructRAIIHandleMemberFunctionVoidEnumerateChain( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + declaration += chainDeclaration; + definition += chainDefinition; + commandConstructed = true; + } + } + } + } + break; + case 3: + // three return parameters + if ( vectorParamIndices.size() == 2 ) + { + // two vector parameters + auto firstVectorParamIt = vectorParamIndices.begin(); + auto secondVectorParamIt = std::next( firstVectorParamIt ); + if ( ( firstVectorParamIt->second == nonConstPointerParamIndices[0] ) && + ( firstVectorParamIt->first == nonConstPointerParamIndices[1] ) && + ( secondVectorParamIt->first == nonConstPointerParamIndices[2] ) && + ( firstVectorParamIt->second == secondVectorParamIt->second ) ) + { + // the two vectors use the very same size parameter + // both vectors, as well as the size parameter are non-const pointer that is output parameters + assert( commandIt->second.params[firstVectorParamIt->first].type.isNonConstPointer() && + commandIt->second.params[secondVectorParamIt->first].type.isNonConstPointer() && + commandIt->second.params[firstVectorParamIt->second].type.isNonConstPointer() ); + // the size is a return value as well -> enumerate the values + if ( commandIt->second.returnType == "VkResult" ) + { + // as the returnType is "VkResult", there has to be at least one success code + assert( !commandIt->second.successCodes.empty() ); + if ( !commandIt->second.errorCodes.empty() ) + { + if ( ( commandIt->second.successCodes.size() == 2 ) && + ( commandIt->second.successCodes[0] == "VK_SUCCESS" ) && + ( commandIt->second.successCodes[1] == "VK_INCOMPLETE" ) ) + { + std::tie( declaration, definition ) = constructRAIIHandleMemberFunctionResultEnumerateTwoVectors( + commandIt, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices ); + commandConstructed = true; + } + } + } + } + } + if ( isStructureChainAnchor( commandIt->second.params[nonConstPointerParamIndices[0]].type.type ) || + isStructureChainAnchor( commandIt->second.params[nonConstPointerParamIndices[1]].type.type ) || + isStructureChainAnchor( commandIt->second.params[nonConstPointerParamIndices[2]].type.type ) ) + { + commandConstructed = true; + } + break; + } + functionDeclarations += declaration; + functionDefinitions += definition; + if ( !commandConstructed ) + { + throw std::runtime_error( "Never encountered a function like <" + commandIt->first + "> !" ); + } + } +} + +std::pair + VulkanHppGenerator::constructRAIIHandleMemberFunctions( std::pair const & handle, + std::set const & specialFunctions ) const +{ + std::string functionDeclarations, functionDefinitions; + for ( auto const & command : handle.second.commands ) + { + constructRAIIHandleMemberFunction( + functionDeclarations, functionDefinitions, command, handle.first.empty() ? 0 : 1, specialFunctions ); + } + for ( auto const & command : handle.second.secondLevelCommands ) + { + assert( !handle.first.empty() ); + constructRAIIHandleMemberFunction( functionDeclarations, functionDefinitions, command, 2, specialFunctions ); + } + return std::make_pair( functionDeclarations, functionDefinitions ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionType( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const +{ + std::set skippedParameters = + determineSkippedParams( commandIt->second.params, initialSkipCount, vectorParamIndices, {}, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + std::string returnType = namespacedType( commandIt->second.returnType ); + std::pair>> vectorSizeCheck = needsVectorSizeCheck( vectorParamIndices ); + std::string vectorSizeCheckString = + vectorSizeCheck.first + ? constructVectorSizeCheck( + commandIt->first, commandIt->second, initialSkipCount, vectorSizeCheck.second, skippedParameters ) + : ""; + + std::string const declarationTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD ${returnType} ${commandName}( ${argumentList} ) const VULKAN_HPP_NOEXCEPT; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { + { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "returnType", returnType }, + } ); + + std::string const definitionTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD ${returnType} ${className}::${commandName}( ${argumentList} ) const VULKAN_HPP_NOEXCEPT + {${vectorSizeCheck} + return getDispatcher()->${vkCommand}( ${callArguments} ); + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "returnType", returnType }, + { "vectorSizeCheck", vectorSizeCheckString }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionVkType( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const +{ + std::set skippedParameters = + determineSkippedParams( commandIt->second.params, initialSkipCount, vectorParamIndices, {}, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + std::string returnType = namespacedType( commandIt->second.returnType ); + + std::pair>> vectorSizeCheck = needsVectorSizeCheck( vectorParamIndices ); + std::string vectorSizeCheckString = + vectorSizeCheck.first + ? constructRAIIHandleVectorSizeCheck( + commandIt->first, commandIt->second, initialSkipCount, vectorSizeCheck.second, skippedParameters ) + : ""; + + std::string const declarationTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD ${returnType} ${commandName}( ${argumentList} ) const VULKAN_HPP_NOEXCEPT; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { + { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "returnType", returnType }, + } ); + + std::string const definitionTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ${returnType} ${className}::${commandName}( ${argumentList} ) const VULKAN_HPP_NOEXCEPT + {${vectorSizeCheck} + return static_cast<${returnType}>( getDispatcher()->${vkCommand}( ${callArguments} ) ); + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "returnType", returnType }, + { "vectorSizeCheck", vectorSizeCheckString }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionVoid( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const +{ + assert( commandIt->second.successCodes.empty() && commandIt->second.errorCodes.empty() ); + std::set skippedParameters = + determineSkippedParams( commandIt->second.params, initialSkipCount, vectorParamIndices, {}, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + std::pair>> vectorSizeCheck = needsVectorSizeCheck( vectorParamIndices ); + std::string noexceptString = vectorSizeCheck.first ? "VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS" : "VULKAN_HPP_NOEXCEPT"; + std::string vectorSizeCheckString = + vectorSizeCheck.first + ? constructVectorSizeCheck( + commandIt->first, commandIt->second, initialSkipCount, vectorSizeCheck.second, skippedParameters ) + : ""; + + std::string templateString = ( ( vectorParamIndices.size() == 1 ) && + ( commandIt->second.params[vectorParamIndices.begin()->first].type.type == "void" ) ) + ? "template \n" + : ""; + + std::string const declarationTemplate = + R"( +${enter} ${template}void ${commandName}( ${argumentList} ) const ${noexcept}; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "noexcept", noexceptString }, + { "template", templateString } } ); + + std::string const definitionTemplate = + R"( +${enter} ${template}VULKAN_HPP_INLINE void ${className}::${commandName}( ${argumentList} ) const ${noexcept} + {${vectorSizeCheck} + getDispatcher()->${vkCommand}( ${callArguments} ); + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "noexcept", noexceptString }, + { "template", templateString }, + { "vectorSizeCheck", vectorSizeCheckString }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionVoidEnumerate( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + assert( nonConstPointerParamIndices.size() == 2 ); + assert( vectorParamIndices.size() == 1 ); + + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + std::string vectorElementType = + stripPostfix( commandIt->second.params[vectorParamIndices.begin()->first].type.compose(), "*" ); + std::string counterName = + startLowerCase( stripPrefix( commandIt->second.params[vectorParamIndices.begin()->second].name, "p" ) ); + std::string vectorName = + startLowerCase( stripPrefix( commandIt->second.params[vectorParamIndices.begin()->first].name, "p" ) ); + std::string firstCallArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, true, INVALID_INDEX, true ); + std::string secondCallArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + + std::string const declarationTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD std::vector<${vectorElementType}> ${commandName}( ${argumentList} ) const VULKAN_HPP_NOEXCEPT; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "vectorElementType", vectorElementType } } ); + + const std::string definitionTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD std::vector<${vectorElementType}> ${className}::${commandName}( ${argumentList} ) const VULKAN_HPP_NOEXCEPT + { + ${counterType} ${counterName}; + getDispatcher()->${vkCommand}( ${firstCallArguments} ); + std::vector<${vectorElementType}> ${vectorName}( ${counterName} ); + getDispatcher()->${vkCommand}( ${secondCallArguments} ); + VULKAN_HPP_ASSERT( ${counterName} <= ${vectorName}.size() ); + return ${vectorName}; + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "counterName", counterName }, + { "counterType", commandIt->second.params[vectorParamIndices.begin()->second].type.type }, + { "enter", enter }, + { "firstCallArguments", firstCallArguments }, + { "leave", leave }, + { "secondCallArguments", secondCallArguments }, + { "vectorElementType", vectorElementType }, + { "vectorName", vectorName }, + { "vkCommand", commandIt->first } } ); + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionVoidEnumerateChain( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string counterName = + startLowerCase( stripPrefix( commandIt->second.params[vectorParamIndices.begin()->second].name, "p" ) ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + std::string firstCallArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, true, INVALID_INDEX, true ); + std::string secondCallArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string vectorElementType = + stripPostfix( commandIt->second.params[vectorParamIndices.begin()->first].type.compose(), "*" ); + std::string vectorName = + startLowerCase( stripPrefix( commandIt->second.params[vectorParamIndices.begin()->first].name, "p" ) ); + + std::string const declarationTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD std::vector ${commandName}( ${argumentList} ) const; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave } } ); + + const std::string definitionTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD std::vector ${className}::${commandName}( ${argumentList} ) const + { + ${counterType} ${counterName}; + getDispatcher()->${vkCommand}( ${firstCallArguments} ); + std::vector returnVector( ${counterName} ); + std::vector<${vectorElementType}> ${vectorName}( ${counterName} ); + for ( ${counterType} i = 0; i < ${counterName}; i++ ) + { + ${vectorName}[i].pNext = returnVector[i].template get<${vectorElementType}>().pNext; + } + getDispatcher()->${vkCommand}( ${secondCallArguments} ); + VULKAN_HPP_ASSERT( ${counterName} <= ${vectorName}.size() ); + for ( ${counterType} i = 0; i < ${counterName}; i++ ) + { + returnVector[i].template get<${vectorElementType}>() = ${vectorName}[i]; + } + return returnVector; + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "counterName", counterName }, + { "counterType", commandIt->second.params[vectorParamIndices.begin()->second].type.type }, + { "enter", enter }, + { "firstCallArguments", firstCallArguments }, + { "leave", leave }, + { "secondCallArguments", secondCallArguments }, + { "vectorElementType", vectorElementType }, + { "vectorName", vectorName }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionVoidGetChain( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string callArguments = + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + std::string returnType = stripPostfix( commandIt->second.params[nonConstPointerParamIndices[0]].type.compose(), "*" ); + std::string returnVariable = + startLowerCase( stripPrefix( commandIt->second.params[nonConstPointerParamIndices[0]].name, "p" ) ); + + std::string const declarationTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD StructureChain ${commandName}( ${argumentList} ) const VULKAN_HPP_NOEXCEPT; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave } } ); + + std::string const definitionTemplate = + R"( +${enter} template + VULKAN_HPP_NODISCARD StructureChain ${className}::${commandName}( ${argumentList} ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + ${returnType} & ${returnVariable} = structureChain.template get<${returnType}>(); + getDispatcher()->${vkCommand}( ${callArguments} ); + return structureChain; + } +${leave})"; + + std::string definition = + replaceWithMap( definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", callArguments }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "returnVariable", returnVariable }, + { "returnType", returnType }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::pair VulkanHppGenerator::constructRAIIHandleMemberFunctionVoidGetValue( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const +{ + assert( nonConstPointerParamIndices.size() == 1 ); + + std::set skippedParameters = determineSkippedParams( + commandIt->second.params, initialSkipCount, vectorParamIndices, nonConstPointerParamIndices, false ); + std::string argumentListDeclaration = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, false, false, false, false ); + std::string argumentListDefinition = constructArgumentListEnhanced( + commandIt->second.params, skippedParameters, INVALID_INDEX, true, false, false, false ); + std::string commandName = + determineCommandName( commandIt->first, commandIt->second.params[initialSkipCount - 1].type.type, m_tags ); + std::string enter, leave; + std::tie( enter, leave ) = generateProtection( commandIt->second.feature, commandIt->second.extensions ); + std::string returnType = stripPostfix( commandIt->second.params[nonConstPointerParamIndices[0]].type.compose(), "*" ); + std::pair>> vectorSizeCheck = needsVectorSizeCheck( vectorParamIndices ); + std::string noexceptString = vectorSizeCheck.first ? "VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS" : "VULKAN_HPP_NOEXCEPT"; + + std::string const declarationTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD ${returnType} ${commandName}( ${argumentList} ) const ${noexcept}; +${leave})"; + + std::string declaration = replaceWithMap( declarationTemplate, + { + { "argumentList", argumentListDeclaration }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "noexcept", noexceptString }, + { "returnType", returnType }, + } ); + + std::string const definitionTemplate = + R"( +${enter} VULKAN_HPP_NODISCARD ${returnType} ${className}::${commandName}( ${argumentList} ) const ${noexcept} + {${vectorSizeCheck} + ${returnType} ${returnVariable}; + getDispatcher()->${vkCommand}( ${callArguments} ); + return ${returnVariable}; + } +${leave})"; + + std::string definition = replaceWithMap( + definitionTemplate, + { { "argumentList", argumentListDefinition }, + { "callArguments", + constructCallArgumentsEnhanced( commandIt->second.params, initialSkipCount, false, INVALID_INDEX, true ) }, + { "className", stripPrefix( commandIt->second.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "enter", enter }, + { "leave", leave }, + { "noexcept", noexceptString }, + { "vectorSizeCheck", + vectorSizeCheck.first + ? constructVectorSizeCheck( + commandIt->first, commandIt->second, initialSkipCount, vectorSizeCheck.second, skippedParameters ) + : "" }, + { "returnType", returnType }, + { "returnVariable", + startLowerCase( stripPrefix( commandIt->second.params[nonConstPointerParamIndices[0]].name, "p" ) ) }, + { "vkCommand", commandIt->first } } ); + + return std::make_pair( declaration, definition ); +} + +std::string VulkanHppGenerator::constructRAIIHandleSingularConstructorArguments( + std::pair const & handle, + std::map::const_iterator constructorIt ) const +{ + std::string arguments = startLowerCase( stripPrefix( handle.first, "Vk" ) ); + if ( handle.second.destructorIt != m_commands.end() ) + { + for ( auto const & destructorParam : handle.second.destructorIt->second.params ) + { + if ( ( destructorParam.type.type != handle.first ) && + ( std::find_if( handle.second.destructorIt->second.params.begin(), + handle.second.destructorIt->second.params.end(), + [&destructorParam]( ParamData const & pd ) { return pd.len == destructorParam.name; } ) == + handle.second.destructorIt->second.params.end() ) ) + { + if ( std::find_if( constructorIt->second.params.begin(), + constructorIt->second.params.end(), + [&destructorParam]( ParamData const & pd ) { + return pd.type.type == destructorParam.type.type; + } ) != constructorIt->second.params.end() ) + { + if ( isHandleType( destructorParam.type.type ) ) + { + assert( destructorParam.type.isValue() ); + arguments += ", static_cast<" + destructorParam.type.type + ">( *" + destructorParam.name + " )"; + } + else + { + assert( destructorParam.type.type == "VkAllocationCallbacks" ); + arguments += + ", reinterpret_cast( static_cast( allocator ) )"; + } + } + else + { +#if !defined( NDEBUG ) + bool found = false; +#endif + for ( auto const & constructorParam : constructorIt->second.params ) + { + auto structureIt = m_structures.find( constructorParam.type.type ); + if ( structureIt != m_structures.end() ) + { + auto memberIt = std::find_if( + structureIt->second.members.begin(), + structureIt->second.members.end(), + [&destructorParam]( MemberData const & md ) { return md.type.type == destructorParam.type.type; } ); + if ( memberIt != structureIt->second.members.end() ) + { +#if !defined( NDEBUG ) + found = true; +#endif + assert( !constructorParam.type.isValue() ); + std::string argument = + startLowerCase( stripPrefix( constructorParam.name, "p" ) ) + "." + memberIt->name; + if ( isHandleType( memberIt->type.type ) ) + { + argument = "static_cast<" + memberIt->type.type + ">( " + argument + " )"; + } + arguments += ", " + argument; + break; + } + } + } + assert( found ); + } + } + } + } + return arguments; +} + +std::string VulkanHppGenerator::constructRAIIHandleVectorSizeCheck( + std::string const & name, + CommandData const & commandData, + size_t initialSkipCount, + std::map> const & countToVectorMap, + std::set const & skippedParams ) const +{ + std::string const throwTemplate = + R"#( if ( ${zeroSizeCheck}${firstVectorName}.size() != ${secondVectorName}.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::${className}::${commandName}: ${firstVectorName}.size() != ${secondVectorName}.size()" ); + })#"; + + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); + + std::string sizeChecks; + for ( auto const & cvm : countToVectorMap ) + { + assert( !commandData.params[cvm.second[0]].optional ); + + size_t defaultStartIndex = determineDefaultStartIndex( commandData.params, skippedParams ); + std::string firstVectorName = startLowerCase( stripPrefix( commandData.params[cvm.second[0]].name, "p" ) ); + + for ( size_t i = 1; i < cvm.second.size(); i++ ) + { + std::string secondVectorName = startLowerCase( stripPrefix( commandData.params[cvm.second[i]].name, "p" ) ); + bool withZeroSizeCheck = commandData.params[cvm.second[i]].optional && ( defaultStartIndex <= cvm.second[i] ); + sizeChecks += + replaceWithMap( throwTemplate, + { { "firstVectorName", firstVectorName }, + { "className", stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) }, + { "commandName", commandName }, + { "secondVectorName", secondVectorName }, + { "zeroSizeCheck", withZeroSizeCheck ? ( "!" + secondVectorName + ".empty() && " ) : "" } } ); + if ( i + 1 < cvm.second.size() ) + { + sizeChecks += "\n"; + } + } + } + if ( !sizeChecks.empty() ) + { + sizeChecks += "\n"; + } + + return sizeChecks; +} + +std::string + VulkanHppGenerator::constructRAIIHandleUpgradeConstructor( std::pair const & handle ) const +{ + std::string typeName = startLowerCase( stripPrefix( handle.first, "Vk" ) ); + std::string constructorArguments = handle.first + " " + typeName; + std::string constructorInitializationList = "m_" + typeName + "( " + typeName + " )"; + if ( handle.second.destructorIt != m_commands.end() ) + { + for ( auto const & destructorParam : handle.second.destructorIt->second.params ) + { + if ( ( destructorParam.type.type != handle.first ) && + ( std::find_if( handle.second.destructorIt->second.params.begin(), + handle.second.destructorIt->second.params.end(), + [&destructorParam]( ParamData const & pd ) { return pd.len == destructorParam.name; } ) == + handle.second.destructorIt->second.params.end() ) ) + { + if ( isHandleType( destructorParam.type.type ) ) + { + assert( destructorParam.type.isValue() ); + std::string name = destructorParam.name; + constructorArguments += ", " + destructorParam.type.type + " " + name; + constructorInitializationList += ", m_" + name + "( " + name + " )"; + } + else + { + assert( destructorParam.type.type == "VkAllocationCallbacks" ); + constructorArguments += ", VkAllocationCallbacks const * allocator"; + constructorInitializationList += ", m_allocator( allocator )"; + } + } + } + } + if ( isMultiSuccessCodeConstructor( handle.second.constructorIts ) ) + { + constructorArguments += ", VULKAN_HPP_NAMESPACE::Result successCode"; + constructorInitializationList += ", m_constructorSuccessCode( successCode )"; + } + + const std::string protectedConstructorTemplate = R"( + ${handleType}( ${arguments}, VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher ) + : ${initializationList}, m_dispatcher( dispatcher ) + {})"; + + return replaceWithMap( protectedConstructorTemplate, + { { "arguments", constructorArguments }, + { "handleType", stripPrefix( handle.first, "Vk" ) }, + { "initializationList", constructorInitializationList } } ); +} + std::string VulkanHppGenerator::constructReturnType( CommandData const & commandData, std::string const & baseType ) const { @@ -5653,6 +9034,7 @@ std::string VulkanHppGenerator::constructSuccessCodeList( std::vector> const & countToVectorMap, std::set const & skippedParams ) const { @@ -5666,7 +9048,7 @@ std::string throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::${className}::${commandName}: ${firstVectorName}.size() != ${secondVectorName}.size()" ); })#"; - std::string commandName = determineCommandName( name, commandData.params[0].type.type ); + std::string commandName = determineCommandName( name, commandData.params[initialSkipCount - 1].type.type, m_tags ); std::string assertions, throws; for ( auto const & cvm : countToVectorMap ) @@ -5688,7 +9070,7 @@ std::string throws += replaceWithMap( throwTemplate, { { "firstVectorName", firstVectorName }, - { "className", stripPrefix( commandData.handle, "Vk" ) }, + { "className", stripPrefix( commandData.params[initialSkipCount - 1].type.type, "Vk" ) }, { "commandName", commandName }, { "secondVectorName", secondVectorName }, { "zeroSizeCheck", withZeroSizeCheck ? ( "!" + secondVectorName + ".empty() && " ) : "" } } ); @@ -6589,7 +9971,7 @@ void VulkanHppGenerator::EnumData::addEnumValue( int line, } ); if ( it == values.end() ) { - values.push_back( EnumValueData( line, valueName, translatedName, extension, bitpos ) ); + values.emplace_back( line, valueName, translatedName, extension, bitpos ); } else { @@ -6928,6 +10310,96 @@ std::string VulkanHppGenerator::determineEnhancedReturnType( CommandData const & ",Allocator>"; // for the other parameters, we use a vector of the pure type } +std::vector::const_iterator> + VulkanHppGenerator::determineRAIIHandleConstructors( + std::string const & handleType, std::map::const_iterator destructorIt ) const +{ + std::vector::const_iterator> constructorIts; + for ( auto commandIt = m_commands.begin(); commandIt != m_commands.end(); ) + { + commandIt = + std::find_if( commandIt, m_commands.end(), [&handleType]( std::pair const & cd ) { + return std::find_if( cd.second.params.begin(), cd.second.params.end(), [&handleType]( ParamData const & pd ) { + return ( pd.type.type == handleType ) && pd.type.isNonConstPointer(); + } ) != cd.second.params.end(); + } ); + if ( commandIt != m_commands.end() ) + { + // only commands that provide all information needed for the destructor can be considered a constructor! + bool valid = true; + if ( destructorIt != m_commands.end() ) + { + auto paramIt = std::find_if( destructorIt->second.params.begin(), + destructorIt->second.params.end(), + [&handleType]( ParamData const & pd ) { return pd.type.type == handleType; } ); + assert( paramIt != destructorIt->second.params.end() ); + for ( auto pit = destructorIt->second.params.begin(); valid && pit != destructorIt->second.params.end(); ++pit ) + { + valid = ( pit->name == paramIt->len ) || + ( std::find_if( + commandIt->second.params.begin(), commandIt->second.params.end(), [&pit]( ParamData const & pd ) { + return pd.type.type == pit->type.type; + } ) != commandIt->second.params.end() ); + for ( auto cit = commandIt->second.params.begin(); !valid && cit != commandIt->second.params.end(); ++cit ) + { + auto structureIt = m_structures.find( cit->type.type ); + if ( structureIt != m_structures.end() ) + { + valid = std::find_if( structureIt->second.members.begin(), + structureIt->second.members.end(), + [&pit]( MemberData const & md ) { return md.type.type == pit->type.type; } ) != + structureIt->second.members.end(); + } + } + } + } + if ( valid ) + { + constructorIts.push_back( commandIt ); + } + ++commandIt; + } + } + assert( !constructorIts.empty() ); + return constructorIts; +} + +std::map::const_iterator + VulkanHppGenerator::determineRAIIHandleDestructor( std::string const & handleType ) const +{ + std::string type = stripPrefix( handleType, "Vk" ); + auto destructorIt = m_commands.find( "vkDestroy" + type ); + if ( destructorIt == m_commands.end() ) + { + destructorIt = m_commands.find( "vkFree" + type + "s" ); + if ( destructorIt == m_commands.end() ) + { + destructorIt = m_commands.find( "vkRelease" + type ); + if ( destructorIt == m_commands.end() ) + { + if ( handleType == "VkDeviceMemory" ) + { + // special handling for vkDeviceMemory + destructorIt = m_commands.find( "vkFreeMemory" ); + assert( destructorIt != m_commands.end() ); + } + else if ( handleType == "VkDisplayKHR" ) + { + // special handling for VkDisplayKHR + destructorIt = m_commands.find( "vkReleaseDisplayEXT" ); + assert( destructorIt != m_commands.end() ); + } + else + { + assert( ( handleType == "VkDisplayModeKHR" ) || ( handleType == "VkPhysicalDevice" ) || + ( handleType == "VkQueue" ) ); + } + } + } + } + return destructorIt; +} + size_t VulkanHppGenerator::determineReturnParamIndex( CommandData const & commandData, std::map const & vectorParamIndices, bool twoStep ) const @@ -6972,29 +10444,34 @@ size_t VulkanHppGenerator::determineReturnParamIndex( CommandData const & return returnParamIndex; } -std::set VulkanHppGenerator::determineSkippedParams( std::string const & handleType, - std::vector const & params, +std::set VulkanHppGenerator::determineSkippedParams( std::vector const & params, + size_t initialSkipCount, std::map const & vectorParamIndices, std::vector const & returnParamIndices, bool singular ) const { - std::set skippedParams = { returnParamIndices.begin(), returnParamIndices.end() }; - - if ( !handleType.empty() ) + assert( initialSkipCount <= params.size() ); + std::set skippedParams; + for ( size_t i = 0; i < initialSkipCount; ++i ) { - skippedParams.insert( 0 ); + skippedParams.insert( i ); } for ( auto const & vpi : vectorParamIndices ) { - if ( ( std::find_if( returnParamIndices.begin(), - returnParamIndices.end(), - [&vpi]( size_t rpi ) { return vpi.first == rpi; } ) == returnParamIndices.end() ) || + assert( !params[vpi.first].len.empty() ); + if ( ( ( std::find_if( returnParamIndices.begin(), + returnParamIndices.end(), + [&vpi]( size_t rpi ) { return vpi.first == rpi; } ) == returnParamIndices.end() ) && + isParam( params[vpi.first].len, params ) ) || ( singular && params[vpi.second].type.isValue() ) ) { skippedParams.insert( vpi.second ); } } + + skippedParams.insert( returnParamIndices.begin(), returnParamIndices.end() ); + return skippedParams; } @@ -7044,8 +10521,8 @@ std::vector for ( size_t i = 0; i < params.size(); i++ ) { - // very special handling of parameters of some types, which always come as a non-const pointer but are not meant to - // be a potential return value! + // very special handling of parameters of some types, which always come as a non-const pointer but are not meant + // to be a potential return value! if ( params[i].type.isNonConstPointer() && ( specialPointerTypes.find( params[i].type.type ) == specialPointerTypes.end() ) ) { @@ -7151,6 +10628,48 @@ void VulkanHppGenerator::appendIndexTypeTraits( std::string & str ) const } } +void VulkanHppGenerator::distributeSecondLevelCommands( std::set const & specialFunctions ) +{ + for ( auto & handle : m_handles ) + { + if ( !handle.first.empty() ) + { + for ( auto command = handle.second.commands.begin(); command != handle.second.commands.end(); ) + { + bool foundCommand = false; + if ( specialFunctions.find( *command ) == specialFunctions.end() ) + { + auto commandIt = m_commands.find( *command ); + assert( commandIt != m_commands.end() ); + assert( commandIt->second.params.front().type.type == handle.first ); + if ( ( 1 < commandIt->second.params.size() ) && ( isHandleType( commandIt->second.params[1].type.type ) ) && + !commandIt->second.params[1].optional ) + { + auto handleIt = m_handles.find( commandIt->second.params[1].type.type ); + assert( handleIt != m_handles.end() ); + assert( !handleIt->second.constructorIts.empty() ); + if ( ( *handleIt->second.constructorIts.begin() )->second.handle == handle.first ) + { + assert( std::find_if( handleIt->second.constructorIts.begin(), + handleIt->second.constructorIts.end(), + [&handle]( auto const & constructorIt ) { + return constructorIt->second.handle != handle.first; + } ) == handleIt->second.constructorIts.end() ); + handleIt->second.secondLevelCommands.insert( *command ); + command = handle.second.commands.erase( command ); + foundCommand = true; + } + } + } + if ( !foundCommand ) + { + ++command; + } + } + } + } +} + std::string const & VulkanHppGenerator::getTypesafeCheck() const { return m_typesafeCheck; @@ -7442,6 +10961,28 @@ bool VulkanHppGenerator::isLenByStructMember( std::string const & name, ParamDat return false; } +bool VulkanHppGenerator::isMultiSuccessCodeConstructor( + std::vector::const_iterator> const & constructorIts ) const +{ + bool ok = !constructorIts.empty(); + if ( ok ) + { + auto constructorIt = constructorIts.begin(); + ok = ( 2 < ( *constructorIt )->second.successCodes.size() ) || + ( ( ( *constructorIt )->second.successCodes.size() == 2 ) && + ( ( *constructorIt )->second.successCodes[1] != "VK_INCOMPLETE" ) ); +#if !defined( NDEBUG ) + for ( constructorIt = std::next( constructorIt ); constructorIt != constructorIts.end(); ++constructorIt ) + { + assert( ok == ( 2 < ( *constructorIt )->second.successCodes.size() ) || + ( ( ( *constructorIt )->second.successCodes.size() == 2 ) && + ( ( *constructorIt )->second.successCodes[1] != "VK_INCOMPLETE" ) ) ); + } +#endif + } + return ok; +} + bool VulkanHppGenerator::isParam( std::string const & name, std::vector const & params ) const { return std::find_if( params.begin(), params.end(), [&name]( ParamData const & pd ) { return pd.name == name; } ) != @@ -9719,6 +13260,52 @@ void VulkanHppGenerator::registerDeleter( std::string const & } } +void VulkanHppGenerator::renameFunctionParameters() +{ + // we rename a couple of function parameters to prevent this warning, treated as an error: + // warning C4458: declaration of 'objectType' hides class member + for ( auto & command : m_commands ) + { + for ( auto & param : command.second.params ) + { + if ( param.name == "objectType" ) + { + param.name += "_"; + } + } + } +} + +void VulkanHppGenerator::rescheduleRAIIHandle( std::string & str, + std::string & commandDefinitions, + std::pair const & handle, + std::set & listedHandles, + std::set const & specialFunctions ) const +{ + listedHandles.insert( handle.first ); + for ( auto const & parent : handle.second.parents ) + { + if ( listedHandles.find( parent ) == listedHandles.end() ) + { + auto parentIt = m_handles.find( parent ); + assert( parentIt != m_handles.end() ); + appendRAIIHandle( str, commandDefinitions, *parentIt, listedHandles, specialFunctions ); + } + } + + for ( auto constructorIt : handle.second.constructorIts ) + { + for ( auto const & param : constructorIt->second.params ) + { + auto handleIt = m_handles.find( param.type.type ); + if ( handleIt != m_handles.end() && ( listedHandles.find( param.type.type ) == listedHandles.end() ) ) + { + appendRAIIHandle( str, commandDefinitions, *handleIt, listedHandles, specialFunctions ); + } + } + } +} + void VulkanHppGenerator::setVulkanLicenseHeader( int line, std::string const & comment ) { check( m_vulkanLicenseHeader.empty(), line, "second encounter of a Copyright comment" ); @@ -10484,7 +14071,7 @@ int main( int argc, char ** argv ) template class ObjectDestroy { - public: + public: ObjectDestroy() = default; ObjectDestroy( OwnerType owner, @@ -10496,18 +14083,18 @@ int main( int argc, char ** argv ) , m_dispatch( &dispatch ) {} - OwnerType getOwner() const VULKAN_HPP_NOEXCEPT { return m_owner; } - Optional getAllocator() const VULKAN_HPP_NOEXCEPT { return m_allocationCallbacks; } + OwnerType getOwner() const VULKAN_HPP_NOEXCEPT { return m_owner; } + Optional getAllocator() const VULKAN_HPP_NOEXCEPT { return m_allocationCallbacks; } - protected: - template - void destroy(T t) VULKAN_HPP_NOEXCEPT - { - VULKAN_HPP_ASSERT( m_owner && m_dispatch ); - m_owner.destroy( t, m_allocationCallbacks, *m_dispatch ); - } + protected: + template + void destroy(T t) VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( m_owner && m_dispatch ); + m_owner.destroy( t, m_allocationCallbacks, *m_dispatch ); + } - private: + private: OwnerType m_owner = {}; Optional m_allocationCallbacks = nullptr; Dispatch const * m_dispatch = nullptr; @@ -10518,7 +14105,7 @@ int main( int argc, char ** argv ) template class ObjectDestroy { - public: + public: ObjectDestroy() = default; ObjectDestroy( Optional allocationCallbacks, @@ -10527,17 +14114,17 @@ int main( int argc, char ** argv ) , m_dispatch( &dispatch ) {} - Optional getAllocator() const VULKAN_HPP_NOEXCEPT { return m_allocationCallbacks; } + Optional getAllocator() const VULKAN_HPP_NOEXCEPT { return m_allocationCallbacks; } - protected: - template - void destroy(T t) VULKAN_HPP_NOEXCEPT - { - VULKAN_HPP_ASSERT( m_dispatch ); - t.destroy( m_allocationCallbacks, *m_dispatch ); - } + protected: + template + void destroy(T t) VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( m_dispatch ); + t.destroy( m_allocationCallbacks, *m_dispatch ); + } - private: + private: Optional m_allocationCallbacks = nullptr; Dispatch const * m_dispatch = nullptr; }; @@ -11589,9 +15176,8 @@ namespace std tinyxml2::XMLDocument doc; std::string filename = ( argc == 1 ) ? VK_SPEC : argv[1]; - std::cout << "Loading vk.xml from " << filename << std::endl; - std::cout << "Writing vulkan.hpp to " << VULKAN_HPP_FILE << std::endl; + std::cout << "VulkanHppGenerator: Loading " << filename << std::endl; tinyxml2::XMLError error = doc.LoadFile( filename.c_str() ); if ( error != tinyxml2::XML_SUCCESS ) { @@ -11600,8 +15186,10 @@ namespace std return -1; } + std::cout << "VulkanHppGenerator: Parsing " << filename << std::endl; VulkanHppGenerator generator( doc ); + std::cout << "VulkanHppGenerator: Generating " << VULKAN_HPP_FILE << std::endl; std::string str; static const size_t estimatedLength = 4 * 1024 * 1024; str.reserve( estimatedLength ); @@ -11628,9 +15216,15 @@ namespace std generator.appendHandlesCommandDefinitions( str ); generator.appendStructureChainValidation( str ); generator.appendDispatchLoaderDynamic( str ); - str += "} // namespace VULKAN_HPP_NAMESPACE\n"; + str += + "} // namespace VULKAN_HPP_NAMESPACE\n" + "\n" + "namespace std\n" + "{\n"; generator.appendHashStructures( str ); - str += "#endif\n"; + str += + "} // namespace std\n" + "#endif\n"; std::ofstream ofs( VULKAN_HPP_FILE ); assert( !ofs.fail() ); @@ -11638,16 +15232,77 @@ namespace std ofs.close(); #if defined( CLANG_FORMAT_EXECUTABLE ) - std::cout << "VulkanHppGenerator: formatting vulkan.hpp using clang-format..."; + std::cout << "VulkanHppGenerator: Formatting " << VULKAN_HPP_FILE << " using clang-format..." << std::endl; int ret = std::system( "\"" CLANG_FORMAT_EXECUTABLE "\" -i --style=file " VULKAN_HPP_FILE ); if ( ret != 0 ) { - std::cout << "VulkanHppGenerator: failed to format file " << filename << " with error <" << ret << ">\n"; + std::cout << "VulkanHppGenerator: failed to format file " << VULKAN_HPP_FILE << " with error <" << ret << ">\n"; + return -1; + } +#endif + + std::cout << "VulkanHppGenerator: Generating " << VULKAN_RAII_HPP_FILE << std::endl; + str.clear(); + str = generator.getVulkanLicenseHeader() + R"( +#ifndef VULKAN_RAII_HPP +# define VULKAN_RAII_HPP + +#include + +#if !defined( VULKAN_HPP_RAII_NAMESPACE ) +# define VULKAN_HPP_RAII_NAMESPACE raii +#endif + +#if !defined( VULKAN_HPP_RAII_DISPATCHER_TYPE ) +# define VULKAN_HPP_RAII_DISPATCHER_TYPE VULKAN_HPP_DEFAULT_DISPATCHER_TYPE +#endif + +namespace VULKAN_HPP_NAMESPACE +{ + namespace VULKAN_HPP_RAII_NAMESPACE + { +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) && !defined(VULKAN_HPP_NO_EXCEPTIONS) + + template + VULKAN_HPP_CONSTEXPR_14 T exchange( T & obj, U && newValue ) + { +# if ( 14 <= VULKAN_HPP_CPP_VERSION ) + return std::exchange( obj, std::forward( newValue ) ); +# else + T oldValue = std::move( obj ); + obj = std::forward( newValue ); + return oldValue; +# endif + } + +)"; + std::string raiiHandlesCommandDefinitions; + generator.appendRAIIHandles( str, raiiHandlesCommandDefinitions ); + str += raiiHandlesCommandDefinitions; + str += R"( +#endif + } // namespace VULKAN_HPP_RAII_NAMESPACE +} // namespace VULKAN_HPP_NAMESPACE +#endif +)"; + + ofs.open( VULKAN_RAII_HPP_FILE ); + assert( !ofs.fail() ); + ofs << str; + ofs.close(); + +#if defined( CLANG_FORMAT_EXECUTABLE ) + std::cout << "VulkanHppGenerator: Formatting " << VULKAN_RAII_HPP_FILE << " using clang-format..." << std::endl; + ret = std::system( "\"" CLANG_FORMAT_EXECUTABLE "\" -i --style=file " VULKAN_RAII_HPP_FILE ); + if ( ret != 0 ) + { + std::cout << "VulkanHppGenerator: failed to format file " << VULKAN_RAII_HPP_FILE << " with error <" << ret + << ">\n"; return -1; } #else std::cout - << "VulkanHppGenerator: could not find clang-format. The generated vulkan.hpp will not be formatted accordingly.\n"; + << "VulkanHppGenerator: could not find clang-format. The generated files will not be formatted accordingly.\n"; #endif } catch ( std::exception const & e ) diff --git a/VulkanHppGenerator.hpp b/VulkanHppGenerator.hpp index 8f9631e..0fa0141 100644 --- a/VulkanHppGenerator.hpp +++ b/VulkanHppGenerator.hpp @@ -35,6 +35,7 @@ public: void appendHandles( std::string & str ); void appendHandlesCommandDefinitions( std::string & str ) const; void appendHashStructures( std::string & str ) const; + void appendRAIIHandles( std::string & str, std::string & commandDefinitions ); void appendResultExceptions( std::string & str ) const; void appendStructs( std::string & str ); void appendStructureChainValidation( std::string & str ); @@ -211,7 +212,12 @@ private: std::string deletePool; std::string objTypeEnum; std::vector parents; + std::set secondLevelCommands; int xmlLine; + + // RAII data + std::map::const_iterator destructorIt; + std::vector::const_iterator> constructorIts; }; struct MemberData @@ -316,26 +322,31 @@ private: void appendCommand( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition ) const; void appendCommandChained( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t nonConstPointerIndex ) const; void appendCommandSingular( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const; void appendCommandStandard( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition ) const; void appendCommandStandardAndEnhanced( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, std::vector const & nonConstPointerParamIndices ) const; @@ -343,45 +354,53 @@ private: appendCommandStandardEnhancedDeprecatedAllocator( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, std::vector const & nonConstPointerParamIndices ) const; void appendCommandStandardOrEnhanced( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition ) const; void appendCommandUnique( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, size_t nonConstPointerIndex, bool definition ) const; void appendCommandVector( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::pair const & vectorParamIndex, std::vector const & returnParamIndices ) const; void appendCommandVectorChained( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, std::vector const & returnParamIndices ) const; void appendCommandVectorDeprecated( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, std::map const & vectorParamIndices, std::vector const & returnParamIndices, bool definition ) const; void appendCommandVectorSingularUnique( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, std::map const & vectorParamIndices, size_t returnParamIndex, bool definition ) const; void appendCommandVectorUnique( std::string & str, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, std::map const & vectorParamIndices, size_t returnParamIndex, bool definition ) const; @@ -408,6 +427,7 @@ private: std::string const & indentation, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, size_t returnParamIndex, std::map const & vectorParamIndices ) const; void appendFunctionBodyEnhancedReturnResultValue( std::string & str, @@ -415,6 +435,7 @@ private: std::string const & returnName, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, size_t returnParamIndex, bool twoStep ) const; void appendFunctionBodyEnhancedTwoStep( std::string & str, @@ -438,6 +459,15 @@ private: bool hasSizeParam, bool isTemplateParam ) const; void appendHandle( std::string & str, std::pair const & handle ); + void appendRAIIHandle( std::string & str, + std::string & commandDefinitions, + std::pair const & handle, + std::set & listedHandles, + std::set const & specialFunctions ) const; + void appendRAIIHandleContext( std::string & str, + std::string & commandDefinitions, + std::pair const & handle, + std::set const & specialFunctions ) const; void appendStruct( std::string & str, std::pair const & structure ); void appendStructAssignmentOperators( std::string & str, std::pair const & structure, @@ -469,99 +499,127 @@ private: void appendUniqueTypes( std::string & str, std::string const & parentType, std::set const & childrenTypes ) const; + bool checkEquivalentSingularConstructor( + std::vector::const_iterator> const & constructorIts, + std::map::const_iterator constructorIt, + std::vector::const_iterator lenIt ) const; std::string constructArgumentListEnhanced( std::vector const & params, std::set const & skippedParams, size_t singularParam, bool definition, bool withAllocators, - bool structureChain ) const; + bool structureChain, + bool withDispatcher ) const; std::string constructArgumentListStandard( std::vector const & params, std::set const & skippedParams ) const; - std::string constructCallArgumentsEnhanced( std::string const & handle, - std::vector const & params, + std::string constructCallArgumentEnhanced( ParamData const & param, + std::vector const & params, + bool nonConstPointerAsNullptr, + size_t singularParamIndex, + bool raiiHandleMemberFunction ) const; + std::string constructCallArgumentsEnhanced( std::vector const & params, + size_t initialSkipCount, bool nonConstPointerAsNullptr, - size_t singularParamIndex ) const; + size_t singularParamIndex, + bool raiiHandleMemberFunction ) const; std::string constructCallArgumentsStandard( std::string const & handle, std::vector const & params ) const; std::string constructCommandResult( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices ) const; std::string constructCommandResultEnumerate( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::pair const & vectorParamIndices, bool withAllocators ) const; std::string constructCommandResultEnumerateChained( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::pair const & vectorParamIndex, std::vector const & returnParamIndices, bool withAllocator ) const; std::string constructCommandResultEnumerateTwoVectors( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, std::vector const & returnParamIndices, bool withAllocators ) const; std::string constructCommandResultEnumerateTwoVectorsDeprecated( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, bool withAllocators ) const; std::string constructCommandResultGetChain( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, size_t nonConstPointerIndex ) const; std::string constructCommandResultGetHandleUnique( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, size_t nonConstPointerIndex ) const; std::string constructCommandResultGetTwoValues( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::vector const & nonConstPointerParamIndices ) const; std::string constructCommandResultGetTwoVectors( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices ) const; std::string constructCommandResultGetValue( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, size_t nonConstPointerIndex ) const; std::string constructCommandResultGetValueDeprecated( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const; std::string constructCommandResultGetVector( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const; std::string constructCommandResultGetVectorAndValue( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, std::vector const & returnParamIndex, bool withAllocator ) const; std::string constructCommandResultGetVectorDeprecated( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const; std::string constructCommandResultGetVectorOfHandles( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex, bool withAllocator ) const; std::string constructCommandResultGetVectorOfHandlesSingular( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const; std::string constructCommandResultGetVectorOfHandlesUnique( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex, @@ -569,47 +627,61 @@ private: std::string constructCommandResultGetVectorOfHandlesUniqueSingular( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const; std::string constructCommandResultGetVectorSingular( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const; - std::string - constructCommandStandard( std::string const & name, CommandData const & commandData, bool definition ) const; - std::string constructCommandType( std::string const & name, CommandData const & commandData, bool definition ) const; + std::string constructCommandStandard( std::string const & name, + CommandData const & commandData, + size_t initialSkipCount, + bool definition ) const; + std::string constructCommandType( std::string const & name, + CommandData const & commandData, + size_t initialSkipCount, + bool definition ) const; std::string constructCommandVoid( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices ) const; std::string constructCommandVoidEnumerate( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::pair const & vectorParamIndex, std::vector const & returnParamIndices, bool withAllocators ) const; std::string constructCommandVoidEnumerateChained( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::pair const & vectorParamIndex, std::vector const & returnParamIndices, bool withAllocators ) const; std::string constructCommandVoidGetChain( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, size_t nonConstPointerIndex ) const; std::string constructCommandVoidGetValue( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, bool definition, std::map const & vectorParamIndices, size_t returnParamIndex ) const; std::string constructConstexprString( std::pair const & structData, bool assignmentOperator ) const; + std::string constructFailureCheck( std::vector const & successCodes ) const; std::string constructFunctionBodyEnhanced( std::string const & indentation, std::string const & name, CommandData const & commandData, + size_t initialSkipCount, size_t returnParamIndex, size_t templateParamIndex, std::map const & vectorParamIndices, @@ -629,11 +701,187 @@ private: bool withDefaults, bool withAllocator ) const; std::string constructNoDiscardStandard( CommandData const & commandData ) const; + std::pair constructRAIIHandleConstructor( + std::pair const & handle, + std::map::const_iterator constructorIt, + std::string const & enter, + std::string const & leave ) const; + std::string constructRAIIHandleConstructorArguments( std::string const & handleType, + std::vector const & params, + bool singular, + bool encounteredArgument ) const; + std::string constructRAIIHandleConstructorCallArguments( std::string const & handleType, + std::vector const & params, + bool nonConstPointerAsNullptr, + size_t singularParamIndex, + bool allocatorIsMemberVariable ) const; + std::string constructRAIIHandleConstructorEnumerate( + std::pair const & handle, + std::map::const_iterator constructorIt, + std::vector::const_iterator handleParamIt, + std::vector::const_iterator lenParamIt, + std::string const & enter, + std::string const & leave ) const; + std::string constructRAIIHandleConstructorInitializationList( + std::string const & handleType, + std::map::const_iterator constructorIt, + std::map::const_iterator destructorIt, + bool hasSecondLevelCommands ) const; + std::string constructRAIIHandleConstructorResult( + std::pair const & handle, + std::map::const_iterator constructorIt, + std::string const & enter, + std::string const & leave ) const; + std::string constructRAIIHandleConstructorTakeOwnership( std::pair const & handle ) const; + std::pair + constructRAIIHandleConstructors( std::pair const & handle ) const; + std::string constructRAIIHandleConstructorVector( + std::pair const & handle, + std::map::const_iterator constructorIt, + std::vector::const_iterator handleParamIt, + std::string const & enter, + std::string const & leave ) const; + std::string constructRAIIHandleConstructorVectorSingular( + std::pair const & handle, + std::map::const_iterator constructorIt, + std::vector::const_iterator handleParamIt, + std::string const & enter, + std::string const & leave ) const; + std::string constructRAIIHandleConstructorVoid( + std::pair const & handle, + std::map::const_iterator constructorIt, + std::string const & enter, + std::string const & leave ) const; + std::pair + constructRAIIHandleDestructor( std::string const & handleType, + std::map::const_iterator destructorIt, + std::string const & enter ) const; + std::string constructRAIIHandleDestructorCallArguments( std::string const & handleType, + std::vector const & params ) const; + std::tuple + constructRAIIHandleDetails( std::pair const & handle, + std::string const & destructorCall ) const; + std::pair + constructRAIIHandleMemberFunctionResultEnumerate( std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + std::pair constructRAIIHandleMemberFunctionResultEnumerateTwoVectors( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + std::pair + constructRAIIHandleMemberFunctionResultMulti( std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const; + std::pair constructRAIIHandleMemberFunctionResultMultiGetValue( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + std::pair constructRAIIHandleMemberFunctionResultMultiGetVectorOfVoid( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + std::pair constructRAIIHandleMemberFunctionResultMultiGetVectorOfVoidSingular( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + std::pair + constructRAIIHandleMemberFunctionResultMultiNoErrors( std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const; + std::pair + constructRAIIHandleMemberFunctionResultSingle( std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const; + std::pair constructRAIIHandleMemberFunctionResultSingleGetChain( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + std::pair constructRAIIHandleMemberFunctionResultSingleGetVectorAndValue( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + std::pair constructRAIIHandleMemberFunctionResultSingleGetVectorOfVoid( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + std::pair constructRAIIHandleMemberFunctionResultSingleGetVectorOfVoidSingular( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + std::pair + constructRAIIHandleMemberFunctionResultSingleNoErrors( std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const; + std::pair constructRAIIHandleMemberFunctionResultSingleGetValue( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + void constructRAIIHandleMemberFunction( std::string & functionDeclarations, + std::string & functionDefinitions, + std::string const & command, + size_t initialSkipCount, + std::set const & specialFunctions ) const; + std::pair + constructRAIIHandleMemberFunctions( std::pair const & handle, + std::set const & specialFunctions ) const; + std::pair + constructRAIIHandleMemberFunctionType( std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const; + std::pair + constructRAIIHandleMemberFunctionVkType( std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const; + std::pair + constructRAIIHandleMemberFunctionVoid( std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices ) const; + std::pair + constructRAIIHandleMemberFunctionVoidEnumerate( std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + std::pair constructRAIIHandleMemberFunctionVoidEnumerateChain( + std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + std::pair + constructRAIIHandleMemberFunctionVoidGetChain( std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + std::pair + constructRAIIHandleMemberFunctionVoidGetValue( std::map::const_iterator commandIt, + size_t initialSkipCount, + std::map const & vectorParamIndices, + std::vector const & nonConstPointerParamIndices ) const; + std::string constructRAIIHandleSingularConstructorArguments( + std::pair const & handle, + std::map::const_iterator constructorIt ) const; + std::string constructRAIIHandleVectorSizeCheck( std::string const & name, + CommandData const & commandData, + size_t initialSkipCount, + std::map> const & countToVectorMap, + std::set const & skippedParams ) const; + std::string constructRAIIHandleUpgradeConstructor( std::pair const & handle ) const; std::string constructReturnType( CommandData const & commandData, std::string const & baseType ) const; std::string constructSuccessCheck( std::vector const & successCodes ) const; std::string constructSuccessCodeList( std::vector const & successCodes ) const; std::string constructVectorSizeCheck( std::string const & name, CommandData const & commandData, + size_t initialSkipCount, std::map> const & countToVectorMap, std::set const & skippedParams ) const; void checkCorrectness(); @@ -644,11 +892,16 @@ private: std::string determineEnhancedReturnType( CommandData const & commandData, size_t returnParamIndex, bool isStructureChain ) const; - size_t determineReturnParamIndex( CommandData const & commandData, - std::map const & vectorParamIndices, - bool twoStep ) const; - std::set determineSkippedParams( std::string const & handleType, - std::vector const & params, + std::vector::const_iterator> + determineRAIIHandleConstructors( std::string const & handleType, + std::map::const_iterator destructorIt ) const; + std::map::const_iterator + determineRAIIHandleDestructor( std::string const & handleType ) const; + size_t determineReturnParamIndex( CommandData const & commandData, + std::map const & vectorParamIndices, + bool twoStep ) const; + std::set determineSkippedParams( std::vector const & params, + size_t initialSkipCount, std::map const & vectorParamIndices, std::vector const & returnParamIndex, bool singular ) const; @@ -656,6 +909,7 @@ private: std::vector determineConstPointerParamIndices( std::vector const & params ) const; std::vector determineNonConstPointerParamIndices( std::vector const & params ) const; std::map determineVectorParamIndicesNew( std::vector const & params ) const; + void distributeSecondLevelCommands( std::set const & specialFunctions ); std::string generateLenInitializer( std::vector::const_iterator mit, std::map::const_iterator, @@ -676,6 +930,8 @@ private: bool isHandleType( std::string const & type ) const; bool isLenByStructMember( std::string const & name, std::vector const & params ) const; bool isLenByStructMember( std::string const & name, ParamData const & param ) const; + bool isMultiSuccessCodeConstructor( + std::vector::const_iterator> const & constructorIts ) const; bool isParam( std::string const & name, std::vector const & params ) const; bool isStructureChainAnchor( std::string const & type ) const; bool needsComplexBody( CommandData const & commandData ) const; @@ -768,6 +1024,12 @@ private: void readTypeInclude( tinyxml2::XMLElement const * element, std::map const & attributes ); void readTypes( tinyxml2::XMLElement const * element ); void registerDeleter( std::string const & name, std::pair const & commandData ); + void renameFunctionParameters(); + void rescheduleRAIIHandle( std::string & str, + std::string & commandDefinitions, + std::pair const & handle, + std::set & listedHandles, + std::set const & specialFunctions ) const; void setVulkanLicenseHeader( int line, std::string const & comment ); std::string toString( TypeCategory category ); diff --git a/samples/01_InitInstance/01_InitInstance.cpp b/samples/01_InitInstance/01_InitInstance.cpp index 38e5802..0518b01 100644 --- a/samples/01_InitInstance/01_InitInstance.cpp +++ b/samples/01_InitInstance/01_InitInstance.cpp @@ -13,14 +13,14 @@ // limitations under the License. // // VulkanHpp Samples : 01_InitInstance -// Create and destroy a vk::UniqueInstance +// Create and destroy a vk::Instance #include "vulkan/vulkan.hpp" #include -static char const * AppName = "01_InitInstance"; -static char const * EngineName = "Vulkan.hpp"; +static std::string AppName = "01_InitInstance"; +static std::string EngineName = "Vulkan.hpp"; int main( int /*argc*/, char ** /*argv*/ ) { @@ -29,16 +29,16 @@ int main( int /*argc*/, char ** /*argv*/ ) try { // initialize the vk::ApplicationInfo structure - vk::ApplicationInfo applicationInfo( AppName, 1, EngineName, 1, VK_API_VERSION_1_1 ); + vk::ApplicationInfo applicationInfo( AppName.c_str(), 1, EngineName.c_str(), 1, VK_API_VERSION_1_1 ); // initialize the vk::InstanceCreateInfo vk::InstanceCreateInfo instanceCreateInfo( {}, &applicationInfo ); - // create a UniqueInstance - vk::UniqueInstance instance = vk::createInstanceUnique( instanceCreateInfo ); + // create an Instance + vk::Instance instance = vk::createInstance( instanceCreateInfo ); - // Note: No need to explicitly destroy the instance, as the corresponding destroy function is - // called by the destructor of the UniqueInstance on leaving this scope. + // destroy it again + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/02_EnumerateDevices/02_EnumerateDevices.cpp b/samples/02_EnumerateDevices/02_EnumerateDevices.cpp index ac201d8..f9aef8a 100644 --- a/samples/02_EnumerateDevices/02_EnumerateDevices.cpp +++ b/samples/02_EnumerateDevices/02_EnumerateDevices.cpp @@ -30,27 +30,27 @@ #include -static char const * AppName = "02_EnumerateDevices"; -static char const * EngineName = "Vulkan.hpp"; +static std::string AppName = "02_EnumerateDevices"; +static std::string EngineName = "Vulkan.hpp"; int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessengerEXT( instance ); #endif /* VULKAN_HPP_KEY_START */ // enumerate the physicalDevices - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); - - // Note: PhysicalDevices are not created, but just enumerated. Therefore, there is nothing like a - // UniquePhysicalDevice. A PhysicalDevice is unique by definition, and there's no need to destroy it. + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); /* VULKAN_HPP_KEY_END */ + + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/03_InitDevice/03_InitDevice.cpp b/samples/03_InitDevice/03_InitDevice.cpp index 2cffe79..1499505 100644 --- a/samples/03_InitDevice/03_InitDevice.cpp +++ b/samples/03_InitDevice/03_InitDevice.cpp @@ -27,12 +27,13 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); /* VULKAN_HPP_KEY_START */ @@ -40,25 +41,26 @@ int main( int /*argc*/, char ** /*argv*/ ) std::vector queueFamilyProperties = physicalDevice.getQueueFamilyProperties(); // get the first index into queueFamiliyProperties which supports graphics - size_t graphicsQueueFamilyIndex = std::distance( - queueFamilyProperties.begin(), - std::find_if( - queueFamilyProperties.begin(), queueFamilyProperties.end(), []( vk::QueueFamilyProperties const & qfp ) { - return qfp.queueFlags & vk::QueueFlagBits::eGraphics; - } ) ); + auto propertyIterator = std::find_if( + queueFamilyProperties.begin(), queueFamilyProperties.end(), []( vk::QueueFamilyProperties const & qfp ) { + return qfp.queueFlags & vk::QueueFlagBits::eGraphics; + } ); + size_t graphicsQueueFamilyIndex = std::distance( queueFamilyProperties.begin(), propertyIterator ); assert( graphicsQueueFamilyIndex < queueFamilyProperties.size() ); - // create a UniqueDevice + // create a Device float queuePriority = 0.0f; vk::DeviceQueueCreateInfo deviceQueueCreateInfo( vk::DeviceQueueCreateFlags(), static_cast( graphicsQueueFamilyIndex ), 1, &queuePriority ); - vk::UniqueDevice device = - physicalDevice.createDeviceUnique( vk::DeviceCreateInfo( vk::DeviceCreateFlags(), deviceQueueCreateInfo ) ); + vk::Device device = + physicalDevice.createDevice( vk::DeviceCreateInfo( vk::DeviceCreateFlags(), deviceQueueCreateInfo ) ); - // Note: No need to explicitly destroy the device, as the corresponding destroy function is - // called by the destructor of the UniqueDevice on leaving this scope. + // destroy the device + device.destroy(); /* VULKAN_HPP_KEY_END */ + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/04_InitCommandBuffer/04_InitCommandBuffer.cpp b/samples/04_InitCommandBuffer/04_InitCommandBuffer.cpp index 3ca7d02..08121ef 100644 --- a/samples/04_InitCommandBuffer/04_InitCommandBuffer.cpp +++ b/samples/04_InitCommandBuffer/04_InitCommandBuffer.cpp @@ -27,34 +27,41 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); uint32_t graphicsQueueFamilyIndex = vk::su::findGraphicsQueueFamilyIndex( physicalDevice.getQueueFamilyProperties() ); - vk::UniqueDevice device = vk::su::createDevice( physicalDevice, graphicsQueueFamilyIndex ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsQueueFamilyIndex ); /* VULKAN_HPP_KEY_START */ - // create a UniqueCommandPool to allocate a CommandBuffer from - vk::UniqueCommandPool commandPool = device->createCommandPoolUnique( - vk::CommandPoolCreateInfo( vk::CommandPoolCreateFlags(), graphicsQueueFamilyIndex ) ); + // create a CommandPool to allocate a CommandBuffer from + vk::CommandPool commandPool = + device.createCommandPool( vk::CommandPoolCreateInfo( vk::CommandPoolCreateFlags(), graphicsQueueFamilyIndex ) ); // allocate a CommandBuffer from the CommandPool - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - // Note: No need to explicitly free the CommandBuffer or destroy the CommandPool, as the corresponding free and - // destroy functions are called by the destructor of the UniqueCommandBuffer and the UniqueCommandPool on leaving - // this scope. + // freeing the commandBuffer is optional, as it will automatically freed when the corresponding CommandPool is + // destroyed. + device.freeCommandBuffers( commandPool, commandBuffer ); + + // destroy the commandPool + device.destroyCommandPool( commandPool ); /* VULKAN_HPP_KEY_END */ + + device.destroy(); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/05_InitSwapchain/05_InitSwapchain.cpp b/samples/05_InitSwapchain/05_InitSwapchain.cpp index 47125b7..66bb904 100644 --- a/samples/05_InitSwapchain/05_InitSwapchain.cpp +++ b/samples/05_InitSwapchain/05_InitSwapchain.cpp @@ -27,33 +27,33 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); std::vector queueFamilyProperties = physicalDevice.getQueueFamilyProperties(); uint32_t graphicsQueueFamilyIndex = vk::su::findGraphicsQueueFamilyIndex( queueFamilyProperties ); /* VULKAN_HPP_KEY_START */ - uint32_t width = 64; - uint32_t height = 64; - vk::su::WindowData window = vk::su::createWindow( AppName, { width, height } ); - vk::UniqueSurfaceKHR surface; + uint32_t width = 64; + uint32_t height = 64; + vk::su::WindowData window = vk::su::createWindow( AppName, { width, height } ); + vk::SurfaceKHR surface; { VkSurfaceKHR _surface; - glfwCreateWindowSurface( VkInstance( instance.get() ), window.handle, nullptr, &_surface ); - vk::ObjectDestroy _deleter( instance.get() ); - surface = vk::UniqueSurfaceKHR( vk::SurfaceKHR( _surface ), _deleter ); + glfwCreateWindowSurface( static_cast( instance ), window.handle, nullptr, &_surface ); + surface = vk::SurfaceKHR( _surface ); } // determine a queueFamilyIndex that suports present // first check if the graphicsQueueFamiliyIndex is good enough size_t presentQueueFamilyIndex = - physicalDevice.getSurfaceSupportKHR( static_cast( graphicsQueueFamilyIndex ), surface.get() ) + physicalDevice.getSurfaceSupportKHR( static_cast( graphicsQueueFamilyIndex ), surface ) ? graphicsQueueFamilyIndex : queueFamilyProperties.size(); if ( presentQueueFamilyIndex == queueFamilyProperties.size() ) @@ -63,7 +63,7 @@ int main( int /*argc*/, char ** /*argv*/ ) for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) { if ( ( queueFamilyProperties[i].queueFlags & vk::QueueFlagBits::eGraphics ) && - physicalDevice.getSurfaceSupportKHR( static_cast( i ), surface.get() ) ) + physicalDevice.getSurfaceSupportKHR( static_cast( i ), surface ) ) { graphicsQueueFamilyIndex = vk::su::checked_cast( i ); presentQueueFamilyIndex = i; @@ -76,7 +76,7 @@ int main( int /*argc*/, char ** /*argv*/ ) // family index that supports present for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) { - if ( physicalDevice.getSurfaceSupportKHR( static_cast( i ), surface.get() ) ) + if ( physicalDevice.getSurfaceSupportKHR( static_cast( i ), surface ) ) { presentQueueFamilyIndex = i; break; @@ -91,16 +91,15 @@ int main( int /*argc*/, char ** /*argv*/ ) } // create a device - vk::UniqueDevice device = - vk::su::createDevice( physicalDevice, graphicsQueueFamilyIndex, vk::su::getDeviceExtensions() ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsQueueFamilyIndex, vk::su::getDeviceExtensions() ); // get the supported VkFormats - std::vector formats = physicalDevice.getSurfaceFormatsKHR( surface.get() ); + std::vector formats = physicalDevice.getSurfaceFormatsKHR( surface ); assert( !formats.empty() ); vk::Format format = ( formats[0].format == vk::Format::eUndefined ) ? vk::Format::eB8G8R8A8Unorm : formats[0].format; - vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR( surface.get() ); + vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR( surface ); VkExtent2D swapchainExtent; if ( surfaceCapabilities.currentExtent.width == std::numeric_limits::max() ) { @@ -127,14 +126,14 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::CompositeAlphaFlagBitsKHR compositeAlpha = ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::ePreMultiplied ) ? vk::CompositeAlphaFlagBitsKHR::ePreMultiplied - : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::ePostMultiplied ) - ? vk::CompositeAlphaFlagBitsKHR::ePostMultiplied - : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::eInherit ) - ? vk::CompositeAlphaFlagBitsKHR::eInherit - : vk::CompositeAlphaFlagBitsKHR::eOpaque; + : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::ePostMultiplied ) + ? vk::CompositeAlphaFlagBitsKHR::ePostMultiplied + : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::eInherit ) + ? vk::CompositeAlphaFlagBitsKHR::eInherit + : vk::CompositeAlphaFlagBitsKHR::eOpaque; vk::SwapchainCreateInfoKHR swapChainCreateInfo( vk::SwapchainCreateFlagsKHR(), - surface.get(), + surface, surfaceCapabilities.minImageCount, format, vk::ColorSpaceKHR::eSrgbNonlinear, @@ -161,11 +160,11 @@ int main( int /*argc*/, char ** /*argv*/ ) swapChainCreateInfo.pQueueFamilyIndices = queueFamilyIndices; } - vk::UniqueSwapchainKHR swapChain = device->createSwapchainKHRUnique( swapChainCreateInfo ); + vk::SwapchainKHR swapChain = device.createSwapchainKHR( swapChainCreateInfo ); - std::vector swapChainImages = device->getSwapchainImagesKHR( swapChain.get() ); + std::vector swapChainImages = device.getSwapchainImagesKHR( swapChain ); - std::vector imageViews; + std::vector imageViews; imageViews.reserve( swapChainImages.size() ); vk::ComponentMapping componentMapping( vk::ComponentSwizzle::eR, vk::ComponentSwizzle::eG, vk::ComponentSwizzle::eB, vk::ComponentSwizzle::eA ); @@ -174,13 +173,22 @@ int main( int /*argc*/, char ** /*argv*/ ) { vk::ImageViewCreateInfo imageViewCreateInfo( vk::ImageViewCreateFlags(), image, vk::ImageViewType::e2D, format, componentMapping, subResourceRange ); - imageViews.push_back( device->createImageViewUnique( imageViewCreateInfo ) ); + imageViews.push_back( device.createImageView( imageViewCreateInfo ) ); } - // Note: No need to explicitly destroy the ImageViews or the swapChain, as the corresponding destroy - // functions are called by the destructor of the UniqueImageView and the UniqueSwapChainKHR on leaving this scope. + // destroy the imageViews, the swapChain,and the surface + for ( auto & imageView : imageViews ) + { + device.destroyImageView( imageView ); + } + device.destroySwapchainKHR( swapChain ); + instance.destroySurfaceKHR( surface ); /* VULKAN_HPP_KEY_END */ + + device.destroy(); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/06_InitDepthBuffer/06_InitDepthBuffer.cpp b/samples/06_InitDepthBuffer/06_InitDepthBuffer.cpp index b9fa4b2..d4a9834 100644 --- a/samples/06_InitDepthBuffer/06_InitDepthBuffer.cpp +++ b/samples/06_InitDepthBuffer/06_InitDepthBuffer.cpp @@ -27,18 +27,19 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); /* VULKAN_HPP_KEY_START */ @@ -68,10 +69,10 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SampleCountFlagBits::e1, tiling, vk::ImageUsageFlagBits::eDepthStencilAttachment ); - vk::UniqueImage depthImage = device->createImageUnique( imageCreateInfo ); + vk::Image depthImage = device.createImage( imageCreateInfo ); vk::PhysicalDeviceMemoryProperties memoryProperties = physicalDevice.getMemoryProperties(); - vk::MemoryRequirements memoryRequirements = device->getImageMemoryRequirements( depthImage.get() ); + vk::MemoryRequirements memoryRequirements = device.getImageMemoryRequirements( depthImage ); uint32_t typeBits = memoryRequirements.memoryTypeBits; uint32_t typeIndex = uint32_t( ~0 ); for ( uint32_t i = 0; i < memoryProperties.memoryTypeCount; i++ ) @@ -86,22 +87,32 @@ int main( int /*argc*/, char ** /*argv*/ ) typeBits >>= 1; } assert( typeIndex != uint32_t( ~0 ) ); - vk::UniqueDeviceMemory depthMemory = - device->allocateMemoryUnique( vk::MemoryAllocateInfo( memoryRequirements.size, typeIndex ) ); + vk::DeviceMemory depthMemory = + device.allocateMemory( vk::MemoryAllocateInfo( memoryRequirements.size, typeIndex ) ); - device->bindImageMemory( depthImage.get(), depthMemory.get(), 0 ); + device.bindImageMemory( depthImage, depthMemory, 0 ); vk::ComponentMapping componentMapping( vk::ComponentSwizzle::eR, vk::ComponentSwizzle::eG, vk::ComponentSwizzle::eB, vk::ComponentSwizzle::eA ); vk::ImageSubresourceRange subResourceRange( vk::ImageAspectFlagBits::eDepth, 0, 1, 0, 1 ); - vk::UniqueImageView depthView = device->createImageViewUnique( vk::ImageViewCreateInfo( vk::ImageViewCreateFlags(), - depthImage.get(), - vk::ImageViewType::e2D, - depthFormat, - componentMapping, - subResourceRange ) ); + vk::ImageView depthView = device.createImageView( vk::ImageViewCreateInfo( vk::ImageViewCreateFlags(), + depthImage, + vk::ImageViewType::e2D, + depthFormat, + componentMapping, + subResourceRange ) ); + + // destroy depthView, depthMemory, and depthImage + device.destroyImageView( depthView ); + device.freeMemory( depthMemory ); + device.destroyImage( depthImage ); /* VULKAN_HPP_KEY_END */ + + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/07_InitUniformBuffer/07_InitUniformBuffer.cpp b/samples/07_InitUniformBuffer/07_InitUniformBuffer.cpp index 7fc8be5..5cdf34e 100644 --- a/samples/07_InitUniformBuffer/07_InitUniformBuffer.cpp +++ b/samples/07_InitUniformBuffer/07_InitUniformBuffer.cpp @@ -16,9 +16,9 @@ // Initialize a uniform buffer #if defined( _MSC_VER ) -# pragma warning( disable : 4127 ) // disable warning 4127: conditional expression is constant -# pragma warning( disable : 4201 ) // disable warning C4201: nonstandard extension used: nameless struct/union; needed - // to get glm/detail/type_vec?.hpp without warnings +# pragma warning( disable : 4127 ) // disable warning 4127: conditional expression is constant +# pragma warning( disable : 4201 ) // disable warning C4201: nonstandard extension used: nameless struct/union; needed + // to get glm/detail/type_vec?.hpp without warnings #elif defined( __GNUC__ ) // don't know how to switch off that warning here #else @@ -30,7 +30,7 @@ #include -# define GLM_FORCE_RADIANS +#define GLM_FORCE_RADIANS #include static char const * AppName = "07_InitUniformBuffer"; @@ -40,15 +40,17 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); - vk::UniqueDevice device = vk::su::createDevice( - physicalDevice, vk::su::findGraphicsQueueFamilyIndex( physicalDevice.getQueueFamilyProperties() ) ); + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevice.getQueueFamilyProperties() ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsQueueFamilyIndex ); /* VULKAN_HPP_KEY_START */ @@ -56,46 +58,40 @@ int main( int /*argc*/, char ** /*argv*/ ) glm::mat4x4 view = glm::lookAt( glm::vec3( -5.0f, 3.0f, -10.0f ), glm::vec3( 0.0f, 0.0f, 0.0f ), glm::vec3( 0.0f, -1.0f, 0.0f ) ); glm::mat4x4 projection = glm::perspective( glm::radians( 45.0f ), 1.0f, 0.1f, 100.0f ); - glm::mat4x4 clip = glm::mat4x4( 1.0f, - 0.0f, - 0.0f, - 0.0f, - 0.0f, - -1.0f, - 0.0f, - 0.0f, - 0.0f, - 0.0f, - 0.5f, - 0.0f, - 0.0f, - 0.0f, - 0.5f, - 1.0f ); // vulkan clip space has inverted y and half z ! - glm::mat4x4 mvpc = clip * projection * view * model; + // clang-format off + glm::mat4x4 clip = glm::mat4x4( 1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, -1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.5f, 0.0f, + 0.0f, 0.0f, 0.5f, 1.0f ); // vulkan clip space has inverted y and half z ! + // clang-format on + glm::mat4x4 mvpc = clip * projection * view * model; - vk::UniqueBuffer uniformDataBuffer = device->createBufferUnique( + vk::Buffer uniformDataBuffer = device.createBuffer( vk::BufferCreateInfo( vk::BufferCreateFlags(), sizeof( mvpc ), vk::BufferUsageFlagBits::eUniformBuffer ) ); - vk::MemoryRequirements memoryRequirements = device->getBufferMemoryRequirements( uniformDataBuffer.get() ); + vk::MemoryRequirements memoryRequirements = device.getBufferMemoryRequirements( uniformDataBuffer ); uint32_t typeIndex = vk::su::findMemoryType( physicalDevice.getMemoryProperties(), memoryRequirements.memoryTypeBits, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ); - vk::UniqueDeviceMemory uniformDataMemory = - device->allocateMemoryUnique( vk::MemoryAllocateInfo( memoryRequirements.size, typeIndex ) ); + vk::DeviceMemory uniformDataMemory = + device.allocateMemory( vk::MemoryAllocateInfo( memoryRequirements.size, typeIndex ) ); - uint8_t * pData = - static_cast( device->mapMemory( uniformDataMemory.get(), 0, memoryRequirements.size ) ); + uint8_t * pData = static_cast( device.mapMemory( uniformDataMemory, 0, memoryRequirements.size ) ); memcpy( pData, &mvpc, sizeof( mvpc ) ); - device->unmapMemory( uniformDataMemory.get() ); + device.unmapMemory( uniformDataMemory ); - device->bindBufferMemory( uniformDataBuffer.get(), uniformDataMemory.get(), 0 ); + device.bindBufferMemory( uniformDataBuffer, uniformDataMemory, 0 ); - // Note: No need to explicitly destroy the memory or the buffer, as the corresponding destroy function is - // called by the destructor of the UniqueMemory or UniqueBuffer, respectively, on leaving this scope. + // free device memory and destroy buffer + device.freeMemory( uniformDataMemory ); + device.destroyBuffer( uniformDataBuffer ); /* VULKAN_HPP_KEY_END */ + + device.destroy(); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/08_InitPipelineLayout/08_InitPipelineLayout.cpp b/samples/08_InitPipelineLayout/08_InitPipelineLayout.cpp index ccf81ee..0c6fd1d 100644 --- a/samples/08_InitPipelineLayout/08_InitPipelineLayout.cpp +++ b/samples/08_InitPipelineLayout/08_InitPipelineLayout.cpp @@ -27,33 +27,39 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); - vk::UniqueDevice device = vk::su::createDevice( - physicalDevice, vk::su::findGraphicsQueueFamilyIndex( physicalDevice.getQueueFamilyProperties() ) ); + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevice.getQueueFamilyProperties() ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsQueueFamilyIndex ); /* VULKAN_HPP_KEY_START */ // create a DescriptorSetLayout vk::DescriptorSetLayoutBinding descriptorSetLayoutBinding( 0, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex ); - vk::UniqueDescriptorSetLayout descriptorSetLayout = device->createDescriptorSetLayoutUnique( + vk::DescriptorSetLayout descriptorSetLayout = device.createDescriptorSetLayout( vk::DescriptorSetLayoutCreateInfo( vk::DescriptorSetLayoutCreateFlags(), descriptorSetLayoutBinding ) ); // create a PipelineLayout using that DescriptorSetLayout - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); - // Note: No need to explicitly destroy the layouts, as the corresponding destroy function is - // called by the destructor of the UniqueDescriptorSetLayout or UniquePipelineLayout, respectively, on leaving this - // scope. + // destroy the pipelineLayout and the descriptorSetLayout + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); /* VULKAN_HPP_KEY_END */ + + device.destroy(); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/09_InitDescriptorSet/09_InitDescriptorSet.cpp b/samples/09_InitDescriptorSet/09_InitDescriptorSet.cpp index e7da2c8..3a12ace 100644 --- a/samples/09_InitDescriptorSet/09_InitDescriptorSet.cpp +++ b/samples/09_InitDescriptorSet/09_InitDescriptorSet.cpp @@ -40,42 +40,52 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); - vk::UniqueDevice device = vk::su::createDevice( - physicalDevice, vk::su::findGraphicsQueueFamilyIndex( physicalDevice.getQueueFamilyProperties() ) ); + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevice.getQueueFamilyProperties() ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsQueueFamilyIndex ); vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::su::copyToDevice( - device, uniformBufferData.deviceMemory, vk::su::createModelViewProjectionClipMatrix( vk::Extent2D( 0, 0 ) ) ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( vk::Extent2D( 0, 0 ) ); + vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcMatrix ); - vk::UniqueDescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); /* VULKAN_HPP_KEY_START */ // create a descriptor pool - vk::DescriptorPoolSize poolSize( vk::DescriptorType::eUniformBuffer, 1 ); - vk::UniqueDescriptorPool descriptorPool = device->createDescriptorPoolUnique( + vk::DescriptorPoolSize poolSize( vk::DescriptorType::eUniformBuffer, 1 ); + vk::DescriptorPool descriptorPool = device.createDescriptorPool( vk::DescriptorPoolCreateInfo( vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 1, poolSize ) ); // allocate a descriptor set - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); - vk::DescriptorBufferInfo descriptorBufferInfo( uniformBufferData.buffer.get(), 0, sizeof( glm::mat4x4 ) ); - device->updateDescriptorSets( - vk::WriteDescriptorSet( descriptorSet.get(), 0, 0, vk::DescriptorType::eUniformBuffer, {}, descriptorBufferInfo ), - {} ); + vk::DescriptorBufferInfo descriptorBufferInfo( uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::WriteDescriptorSet writeDescriptorSet( + descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, descriptorBufferInfo ); + device.updateDescriptorSets( writeDescriptorSet, nullptr ); + + device.freeDescriptorSets( descriptorPool, descriptorSet ); + device.destroyDescriptorPool( descriptorPool ); /* VULKAN_HPP_KEY_END */ + + device.destroyDescriptorSetLayout( descriptorSetLayout ); + uniformBufferData.clear( device ); + device.destroy(); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/10_InitRenderPass/10_InitRenderPass.cpp b/samples/10_InitRenderPass/10_InitRenderPass.cpp index e259b58..2613d50 100644 --- a/samples/10_InitRenderPass/10_InitRenderPass.cpp +++ b/samples/10_InitRenderPass/10_InitRenderPass.cpp @@ -16,8 +16,8 @@ // Initialize a render pass #if defined( _MSC_VER ) -# pragma warning( disable : 4201 ) // disable warning C4201: nonstandard extension used: nameless struct/union; needed - // to get glm/detail/type_vec?.hpp without warnings +# pragma warning( disable : 4201 ) // disable warning C4201: nonstandard extension used: nameless struct/union; needed + // to get glm/detail/type_vec?.hpp without warnings #elif defined( __GNUC__ ) // don't know how to switch off that warning here #else @@ -39,22 +39,23 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 64, 64 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); vk::Format colorFormat = - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format; + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::Format depthFormat = vk::Format::eD16Unorm; /* VULKAN_HPP_KEY_START */ @@ -84,13 +85,17 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SubpassDescription subpass( vk::SubpassDescriptionFlags(), vk::PipelineBindPoint::eGraphics, {}, colorReference, {}, &depthReference ); - vk::UniqueRenderPass renderPass = device->createRenderPassUnique( + vk::RenderPass renderPass = device.createRenderPass( vk::RenderPassCreateInfo( vk::RenderPassCreateFlags(), attachmentDescriptions, subpass ) ); - // Note: No need to explicitly destroy the RenderPass or the Semaphore, as the corresponding destroy - // functions are called by the destructor of the UniqueRenderPass and the UniqueSemaphore on leaving this scope. + device.destroyRenderPass( renderPass ); /* VULKAN_HPP_KEY_END */ + + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/11_InitShaders/11_InitShaders.cpp b/samples/11_InitShaders/11_InitShaders.cpp index 9c7d154..2593751 100644 --- a/samples/11_InitShaders/11_InitShaders.cpp +++ b/samples/11_InitShaders/11_InitShaders.cpp @@ -29,15 +29,17 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); - vk::UniqueDevice device = vk::su::createDevice( - physicalDevice, vk::su::findGraphicsQueueFamilyIndex( physicalDevice.getQueueFamilyProperties() ) ); + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevice.getQueueFamilyProperties() ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsQueueFamilyIndex ); /* VULKAN_HPP_KEY_START */ @@ -48,21 +50,25 @@ int main( int /*argc*/, char ** /*argv*/ ) assert( ok ); vk::ShaderModuleCreateInfo vertexShaderModuleCreateInfo( vk::ShaderModuleCreateFlags(), vertexShaderSPV ); - vk::UniqueShaderModule vertexShaderModule = device->createShaderModuleUnique( vertexShaderModuleCreateInfo ); + vk::ShaderModule vertexShaderModule = device.createShaderModule( vertexShaderModuleCreateInfo ); std::vector fragmentShaderSPV; ok = vk::su::GLSLtoSPV( vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C, fragmentShaderSPV ); assert( ok ); vk::ShaderModuleCreateInfo fragmentShaderModuleCreateInfo( vk::ShaderModuleCreateFlags(), fragmentShaderSPV ); - vk::UniqueShaderModule fragmentShaderModule = device->createShaderModuleUnique( fragmentShaderModuleCreateInfo ); + vk::ShaderModule fragmentShaderModule = device.createShaderModule( fragmentShaderModuleCreateInfo ); glslang::FinalizeProcess(); - // Note: No need to explicitly destroy the ShaderModules, as the corresponding destroy - // functions are called by the destructor of the UniqueShaderModule on leaving this scope. + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); /* VULKAN_HPP_KEY_END */ + + device.destroy(); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/12_InitFrameBuffers/12_InitFrameBuffers.cpp b/samples/12_InitFrameBuffers/12_InitFrameBuffers.cpp index 0103f50..016c585 100644 --- a/samples/12_InitFrameBuffers/12_InitFrameBuffers.cpp +++ b/samples/12_InitFrameBuffers/12_InitFrameBuffers.cpp @@ -27,57 +27,64 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 64, 64 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + nullptr, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); vk::su::DepthBufferData depthBufferData( physicalDevice, device, vk::Format::eD16Unorm, surfaceData.extent ); - vk::UniqueRenderPass renderPass = - vk::su::createRenderPass( device, swapChainData.colorFormat, depthBufferData.format ); + vk::RenderPass renderPass = vk::su::createRenderPass( device, swapChainData.colorFormat, depthBufferData.format ); /* VULKAN_KEY_START */ std::array attachments; - attachments[1] = depthBufferData.imageView.get(); + attachments[1] = depthBufferData.imageView; - std::vector framebuffers; + vk::FramebufferCreateInfo framebufferCreateInfo( + vk::FramebufferCreateFlags(), renderPass, attachments, surfaceData.extent.width, surfaceData.extent.height, 1 ); + std::vector framebuffers; framebuffers.reserve( swapChainData.imageViews.size() ); - for ( auto const & view : swapChainData.imageViews ) + for ( auto const & imageView : swapChainData.imageViews ) { - attachments[0] = view.get(); - framebuffers.push_back( device->createFramebufferUnique( vk::FramebufferCreateInfo( vk::FramebufferCreateFlags(), - renderPass.get(), - attachments, - surfaceData.extent.width, - surfaceData.extent.height, - 1 ) ) ); + attachments[0] = imageView; + framebuffers.push_back( device.createFramebuffer( framebufferCreateInfo ) ); } - // Note: No need to explicitly destroy the Framebuffers, as the destroy functions are called by the destructor of - // the UniqueFramebuffer on leaving this scope. + for ( auto const & framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } /* VULKAN_KEY_END */ + + device.destroyRenderPass( renderPass ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/13_InitVertexBuffer/13_InitVertexBuffer.cpp b/samples/13_InitVertexBuffer/13_InitVertexBuffer.cpp index 68e98c3..5ee0da4 100644 --- a/samples/13_InitVertexBuffer/13_InitVertexBuffer.cpp +++ b/samples/13_InitVertexBuffer/13_InitVertexBuffer.cpp @@ -28,73 +28,72 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 64, 64 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); vk::su::DepthBufferData depthBufferData( physicalDevice, device, vk::Format::eD16Unorm, surfaceData.extent ); - vk::UniqueRenderPass renderPass = - vk::su::createRenderPass( device, swapChainData.colorFormat, depthBufferData.format ); + vk::RenderPass renderPass = vk::su::createRenderPass( device, swapChainData.colorFormat, depthBufferData.format ); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); /* VULKAN_KEY_START */ // create a vertex buffer for some vertex and color data - vk::UniqueBuffer vertexBuffer = device->createBufferUnique( vk::BufferCreateInfo( + vk::Buffer vertexBuffer = device.createBuffer( vk::BufferCreateInfo( vk::BufferCreateFlags(), sizeof( coloredCubeData ), vk::BufferUsageFlagBits::eVertexBuffer ) ); // allocate device memory for that buffer - vk::MemoryRequirements memoryRequirements = device->getBufferMemoryRequirements( vertexBuffer.get() ); + vk::MemoryRequirements memoryRequirements = device.getBufferMemoryRequirements( vertexBuffer ); uint32_t memoryTypeIndex = vk::su::findMemoryType( physicalDevice.getMemoryProperties(), memoryRequirements.memoryTypeBits, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ); - vk::UniqueDeviceMemory deviceMemory = - device->allocateMemoryUnique( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); + vk::DeviceMemory deviceMemory = + device.allocateMemory( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); // copy the vertex and color data into that device memory - uint8_t * pData = static_cast( device->mapMemory( deviceMemory.get(), 0, memoryRequirements.size ) ); + uint8_t * pData = static_cast( device.mapMemory( deviceMemory, 0, memoryRequirements.size ) ); memcpy( pData, coloredCubeData, sizeof( coloredCubeData ) ); - device->unmapMemory( deviceMemory.get() ); + device.unmapMemory( deviceMemory ); // and bind the device memory to the vertex buffer - device->bindBufferMemory( vertexBuffer.get(), deviceMemory.get(), 0 ); + device.bindBufferMemory( vertexBuffer, deviceMemory, 0 ); - vk::UniqueSemaphore imageAcquiredSemaphore = - device->createSemaphoreUnique( vk::SemaphoreCreateInfo( vk::SemaphoreCreateFlags() ) ); - vk::ResultValue currentBuffer = device->acquireNextImageKHR( - swapChainData.swapChain.get(), vk::su::FenceTimeout, imageAcquiredSemaphore.get(), nullptr ); + vk::Semaphore imageAcquiredSemaphore = + device.createSemaphore( vk::SemaphoreCreateInfo( vk::SemaphoreCreateFlags() ) ); + vk::ResultValue currentBuffer = device.acquireNextImageKHR( + swapChainData.swapChain, vk::su::FenceTimeout, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); @@ -102,25 +101,39 @@ int main( int /*argc*/, char ** /*argv*/ ) clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - commandBuffer->begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlags() ) ); + commandBuffer.begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlags() ) ); - vk::RenderPassBeginInfo renderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, + framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); - commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer->bindVertexBuffers( 0, *vertexBuffer, { 0 } ); + commandBuffer.bindVertexBuffers( 0, vertexBuffer, { 0 } ); - commandBuffer->endRenderPass(); - commandBuffer->end(); + commandBuffer.endRenderPass(); + commandBuffer.end(); vk::su::submitAndWait( device, graphicsQueue, commandBuffer ); - // Note: No need to explicitly destroy the vertexBuffer, deviceMemory, or semaphore, as the destroy functions are - // called by the destructor of the UniqueBuffer, UniqueDeviceMemory, and UniqueSemaphore, respectively, on leaving - // this scope. + device.destroySemaphore( imageAcquiredSemaphore ); + device.freeMemory( deviceMemory ); + device.destroyBuffer( vertexBuffer ); /* VULKAN_KEY_END */ + + swapChainData.clear( device ); + depthBufferData.clear( device ); + for ( auto const & framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyRenderPass( renderPass ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/14_InitPipeline/14_InitPipeline.cpp b/samples/14_InitPipeline/14_InitPipeline.cpp index b229d63..57b0092 100644 --- a/samples/14_InitPipeline/14_InitPipeline.cpp +++ b/samples/14_InitPipeline/14_InitPipeline.cpp @@ -41,34 +41,35 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, vk::Format::eD16Unorm ); - vk::UniqueDescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PC_C ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C ); glslang::FinalizeProcess(); @@ -76,9 +77,9 @@ int main( int /*argc*/, char ** /*argv*/ ) std::array pipelineShaderStageCreateInfos = { vk::PipelineShaderStageCreateInfo( - vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eVertex, vertexShaderModule.get(), "main" ), + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eVertex, vertexShaderModule, "main" ), vk::PipelineShaderStageCreateInfo( - vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eFragment, fragmentShaderModule.get(), "main" ) + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eFragment, fragmentShaderModule, "main" ) }; vk::VertexInputBindingDescription vertexInputBindingDescription( 0, sizeof( coloredCubeData[0] ) ); @@ -167,14 +168,13 @@ int main( int /*argc*/, char ** /*argv*/ ) &pipelineDepthStencilStateCreateInfo, // pDepthStencilState &pipelineColorBlendStateCreateInfo, // pColorBlendState &pipelineDynamicStateCreateInfo, // pDynamicState - pipelineLayout.get(), // layout - renderPass.get() // renderPass + pipelineLayout, // layout + renderPass // renderPass ); - vk::Result result; - vk::UniquePipeline pipeline; - std::tie( result, pipeline ) = - device->createGraphicsPipelineUnique( nullptr, graphicsPipelineCreateInfo ).asTuple(); + vk::Result result; + vk::Pipeline pipeline; + std::tie( result, pipeline ) = device.createGraphicsPipeline( nullptr, graphicsPipelineCreateInfo ); switch ( result ) { case vk::Result::eSuccess: break; @@ -184,7 +184,19 @@ int main( int /*argc*/, char ** /*argv*/ ) default: assert( false ); // should never happen } + device.destroyPipeline( pipeline ); + /* VULKAN_KEY_END */ + + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + device.destroyRenderPass( renderPass ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/15_DrawCube/15_DrawCube.cpp b/samples/15_DrawCube/15_DrawCube.cpp index e3b9ae3..b4d2fa8 100644 --- a/samples/15_DrawCube/15_DrawCube.cpp +++ b/samples/15_DrawCube/15_DrawCube.cpp @@ -32,36 +32,36 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); @@ -69,27 +69,27 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::su::copyToDevice( - device, uniformBufferData.deviceMemory, vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ) ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcMatrix ); - vk::UniqueDescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, depthBufferData.format ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PC_C ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); vk::su::BufferData vertexBufferData( @@ -99,75 +99,74 @@ int main( int /*argc*/, char ** /*argv*/ ) coloredCubeData, sizeof( coloredCubeData ) / sizeof( coloredCubeData[0] ) ); - vk::UniqueDescriptorPool descriptorPool = + vk::DescriptorPool descriptorPool = vk::su::createDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 1 } } ); - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); vk::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, {} } }, {} ); - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); - vk::UniquePipeline graphicsPipeline = vk::su::createGraphicsPipeline( + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), sizeof( coloredCubeData[0] ), { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32B32A32Sfloat, 16 } }, vk::FrontFace::eClockwise, true, pipelineLayout, renderPass ); + /* VULKAN_KEY_START */ // Get the index of the next available swapchain image: - vk::UniqueSemaphore imageAcquiredSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); - vk::ResultValue currentBuffer = device->acquireNextImageKHR( - swapChainData.swapChain.get(), vk::su::FenceTimeout, imageAcquiredSemaphore.get(), nullptr ); + vk::Semaphore imageAcquiredSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); + vk::ResultValue currentBuffer = + device.acquireNextImageKHR( swapChainData.swapChain, vk::su::FenceTimeout, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); - commandBuffer->begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlags() ) ); + commandBuffer.begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlags() ) ); std::array clearValues; clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, + framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); - commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSet.get(), nullptr ); + commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, nullptr ); - commandBuffer->bindVertexBuffers( 0, *vertexBufferData.buffer, { 0 } ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.bindVertexBuffers( 0, vertexBufferData.buffer, { 0 } ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer->draw( 12 * 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->end(); + commandBuffer.draw( 12 * 3, 1, 0, 0 ); + commandBuffer.endRenderPass(); + commandBuffer.end(); - vk::UniqueFence drawFence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence drawFence = device.createFence( vk::FenceCreateInfo() ); vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); - graphicsQueue.submit( submitInfo, drawFence.get() ); + vk::SubmitInfo submitInfo( imageAcquiredSemaphore, waitDestinationStageMask, commandBuffer ); + graphicsQueue.submit( submitInfo, drawFence ); - while ( vk::Result::eTimeout == device->waitForFences( drawFence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( drawFence, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -178,9 +177,34 @@ int main( int /*argc*/, char ** /*argv*/ ) } std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + device.waitIdle(); + + device.destroyFence( drawFence ); + device.destroySemaphore( imageAcquiredSemaphore ); + /* VULKAN_KEY_END */ - device->waitIdle(); + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + device.destroyDescriptorPool( descriptorPool ); + vertexBufferData.clear( device ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + uniformBufferData.clear( device ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/16_Vulkan_1_1/16_Vulkan_1_1.cpp b/samples/16_Vulkan_1_1/16_Vulkan_1_1.cpp index 455e6d5..9fb121a 100644 --- a/samples/16_Vulkan_1_1/16_Vulkan_1_1.cpp +++ b/samples/16_Vulkan_1_1/16_Vulkan_1_1.cpp @@ -63,14 +63,15 @@ int main( int /*argc*/, char ** /*argv*/ ) ( loader_major_version == desiredMajorVersion && loader_minor_version >= desiredMinorVersion ) ) { // Create the instance - vk::UniqueInstance instance = + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions(), desiredVersion ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif // Get the list of physical devices - std::vector physicalDevices = instance->enumeratePhysicalDevices(); + std::vector physicalDevices = instance.enumeratePhysicalDevices(); // Go through the list of physical devices and select only those that are capable of running the API version we // want. @@ -87,6 +88,9 @@ int main( int /*argc*/, char ** /*argv*/ ) usingMajorVersion = desiredMajorVersion; usingMinorVersion = desiredMinorVersion; } + + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } usingVersionString += std::to_string( usingMajorVersion ); diff --git a/samples/CopyBlitImage/CopyBlitImage.cpp b/samples/CopyBlitImage/CopyBlitImage.cpp index a22f993..0d2f671 100644 --- a/samples/CopyBlitImage/CopyBlitImage.cpp +++ b/samples/CopyBlitImage/CopyBlitImage.cpp @@ -27,17 +27,17 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 640, 640 ) ); - vk::SurfaceCapabilitiesKHR surfaceCapabilities = - physicalDevice.getSurfaceCapabilitiesKHR( surfaceData.surface.get() ); + vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR( surfaceData.surface ); if ( !( surfaceCapabilities.supportedUsageFlags & vk::ImageUsageFlagBits::eTransferDst ) ) { std::cout << "Surface cannot be destination of blit - abort \n"; @@ -45,26 +45,25 @@ int main( int /*argc*/, char ** /*argv*/ ) } std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferDst, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); @@ -74,18 +73,18 @@ int main( int /*argc*/, char ** /*argv*/ ) assert( ( formatProperties.linearTilingFeatures & vk::FormatFeatureFlagBits::eBlitSrc ) && "Format cannot be used as transfer source" ); - vk::UniqueSemaphore imageAcquiredSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); + vk::Semaphore imageAcquiredSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); // Get the index of the next available swapchain image: - vk::ResultValue nextImage = device->acquireNextImageKHR( - swapChainData.swapChain.get(), vk::su::FenceTimeout, imageAcquiredSemaphore.get(), nullptr ); + vk::ResultValue nextImage = + device.acquireNextImageKHR( swapChainData.swapChain, vk::su::FenceTimeout, imageAcquiredSemaphore, nullptr ); assert( nextImage.result == vk::Result::eSuccess ); assert( nextImage.value < swapChainData.images.size() ); - uint32_t currentBuffer = nextImage.value; + uint32_t imageIndex = nextImage.value; - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); vk::su::setImageLayout( commandBuffer, - swapChainData.images[currentBuffer], + swapChainData.images[imageIndex], swapChainData.colorFormat, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); @@ -100,37 +99,36 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SampleCountFlagBits::e1, vk::ImageTiling::eLinear, vk::ImageUsageFlagBits::eTransferSrc ); - vk::UniqueImage blitSourceImage = device->createImageUnique( imageCreateInfo ); + vk::Image blitSourceImage = device.createImage( imageCreateInfo ); vk::PhysicalDeviceMemoryProperties memoryProperties = physicalDevice.getMemoryProperties(); - vk::MemoryRequirements memoryRequirements = device->getImageMemoryRequirements( blitSourceImage.get() ); + vk::MemoryRequirements memoryRequirements = device.getImageMemoryRequirements( blitSourceImage ); uint32_t memoryTypeIndex = vk::su::findMemoryType( memoryProperties, memoryRequirements.memoryTypeBits, vk::MemoryPropertyFlagBits::eHostVisible ); - vk::UniqueDeviceMemory deviceMemory = - device->allocateMemoryUnique( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); - device->bindImageMemory( blitSourceImage.get(), deviceMemory.get(), 0 ); + vk::DeviceMemory deviceMemory = + device.allocateMemory( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); + device.bindImageMemory( blitSourceImage, deviceMemory, 0 ); vk::su::setImageLayout( commandBuffer, - blitSourceImage.get(), + blitSourceImage, swapChainData.colorFormat, vk::ImageLayout::eUndefined, vk::ImageLayout::eGeneral ); - commandBuffer->end(); + commandBuffer.end(); /* Queue the command buffer for execution */ - vk::UniqueFence commandFence = device->createFenceUnique( {} ); + vk::Fence commandFence = device.createFence( {} ); vk::PipelineStageFlags pipeStageFlags( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - graphicsQueue.submit( vk::SubmitInfo( *imageAcquiredSemaphore, pipeStageFlags, *commandBuffer ), - commandFence.get() ); + graphicsQueue.submit( vk::SubmitInfo( imageAcquiredSemaphore, pipeStageFlags, commandBuffer ), commandFence ); /* Make sure command buffer is finished before mapping */ - while ( device->waitForFences( commandFence.get(), true, vk::su::FenceTimeout ) == vk::Result::eTimeout ) + while ( device.waitForFences( commandFence, true, vk::su::FenceTimeout ) == vk::Result::eTimeout ) ; unsigned char * pImageMemory = - static_cast( device->mapMemory( deviceMemory.get(), 0, memoryRequirements.size ) ); + static_cast( device.mapMemory( deviceMemory, 0, memoryRequirements.size ) ); // Checkerboard of 8x8 pixel squares for ( uint32_t row = 0; row < surfaceData.extent.height; row++ ) @@ -147,20 +145,20 @@ int main( int /*argc*/, char ** /*argv*/ ) } // Flush the mapped memory and then unmap it. Assume it isn't coherent since we didn't really confirm - device->flushMappedMemoryRanges( vk::MappedMemoryRange( deviceMemory.get(), 0, memoryRequirements.size ) ); - device->unmapMemory( deviceMemory.get() ); + device.flushMappedMemoryRanges( vk::MappedMemoryRange( deviceMemory, 0, memoryRequirements.size ) ); + device.unmapMemory( deviceMemory ); - commandBuffer->reset( {} ); - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.reset( {} ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); // Intend to blit from this image, set the layout accordingly vk::su::setImageLayout( commandBuffer, - blitSourceImage.get(), + blitSourceImage, swapChainData.colorFormat, vk::ImageLayout::eGeneral, vk::ImageLayout::eTransferSrcOptimal ); - vk::Image blitDestinationImage = swapChainData.images[currentBuffer]; + vk::Image blitDestinationImage = swapChainData.images[imageIndex]; // Do a 32x32 blit to all of the dst image - should get big squares vk::ImageSubresourceLayers imageSubresourceLayers( vk::ImageAspectFlagBits::eColor, 0, 0, 1 ); @@ -169,12 +167,12 @@ int main( int /*argc*/, char ** /*argv*/ ) { { vk::Offset3D( 0, 0, 0 ), vk::Offset3D( 32, 32, 1 ) } }, imageSubresourceLayers, { { vk::Offset3D( 0, 0, 0 ), vk::Offset3D( surfaceData.extent.width, surfaceData.extent.height, 1 ) } } ); - commandBuffer->blitImage( blitSourceImage.get(), - vk::ImageLayout::eTransferSrcOptimal, - blitDestinationImage, - vk::ImageLayout::eTransferDstOptimal, - imageBlit, - vk::Filter::eLinear ); + commandBuffer.blitImage( blitSourceImage, + vk::ImageLayout::eTransferSrcOptimal, + blitDestinationImage, + vk::ImageLayout::eTransferDstOptimal, + imageBlit, + vk::Filter::eLinear ); // Use a barrier to make sure the blit is finished before the copy starts vk::ImageMemoryBarrier memoryBarrier( vk::AccessFlagBits::eTransferWrite, @@ -185,12 +183,12 @@ int main( int /*argc*/, char ** /*argv*/ ) VK_QUEUE_FAMILY_IGNORED, blitDestinationImage, vk::ImageSubresourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ) ); - commandBuffer->pipelineBarrier( vk::PipelineStageFlagBits::eTransfer, - vk::PipelineStageFlagBits::eTransfer, - vk::DependencyFlags(), - nullptr, - nullptr, - memoryBarrier ); + commandBuffer.pipelineBarrier( vk::PipelineStageFlagBits::eTransfer, + vk::PipelineStageFlagBits::eTransfer, + vk::DependencyFlags(), + nullptr, + nullptr, + memoryBarrier ); // Do a image copy to part of the dst image - checks should stay small vk::ImageCopy imageCopy( imageSubresourceLayers, @@ -198,11 +196,11 @@ int main( int /*argc*/, char ** /*argv*/ ) imageSubresourceLayers, vk::Offset3D( 256, 256, 0 ), vk::Extent3D( 128, 128, 1 ) ); - commandBuffer->copyImage( blitSourceImage.get(), - vk::ImageLayout::eTransferSrcOptimal, - blitDestinationImage, - vk::ImageLayout::eTransferDstOptimal, - imageCopy ); + commandBuffer.copyImage( blitSourceImage, + vk::ImageLayout::eTransferSrcOptimal, + blitDestinationImage, + vk::ImageLayout::eTransferDstOptimal, + imageCopy ); vk::ImageMemoryBarrier prePresentBarrier( vk::AccessFlagBits::eTransferWrite, @@ -211,27 +209,26 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::ImageLayout::ePresentSrcKHR, VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, - swapChainData.images[currentBuffer], + swapChainData.images[imageIndex], vk::ImageSubresourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ) ); - commandBuffer->pipelineBarrier( vk::PipelineStageFlagBits::eTransfer, - vk::PipelineStageFlagBits::eTopOfPipe, - vk::DependencyFlags(), - nullptr, - nullptr, - prePresentBarrier ); - commandBuffer->end(); + commandBuffer.pipelineBarrier( vk::PipelineStageFlagBits::eTransfer, + vk::PipelineStageFlagBits::eTopOfPipe, + vk::DependencyFlags(), + nullptr, + nullptr, + prePresentBarrier ); + commandBuffer.end(); - vk::UniqueFence drawFence = device->createFenceUnique( {} ); - graphicsQueue.submit( vk::SubmitInfo( {}, {}, *commandBuffer ), drawFence.get() ); + vk::Fence drawFence = device.createFence( {} ); + graphicsQueue.submit( vk::SubmitInfo( {}, {}, commandBuffer ), drawFence ); graphicsQueue.waitIdle(); /* Make sure command buffer is finished before presenting */ - while ( device->waitForFences( drawFence.get(), true, vk::su::FenceTimeout ) == vk::Result::eTimeout ) + while ( device.waitForFences( drawFence, true, vk::su::FenceTimeout ) == vk::Result::eTimeout ) ; /* Now present the image in the window */ - vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer, {} ) ); + vk::Result result = presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, imageIndex, {} ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -243,6 +240,19 @@ int main( int /*argc*/, char ** /*argv*/ ) std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); /* VULKAN_KEY_END */ + + device.destroyFence( drawFence ); + device.destroyFence( commandFence ); + device.freeMemory( deviceMemory ); + device.destroyImage( blitSourceImage ); + device.destroySemaphore( imageAcquiredSemaphore ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/CreateDebugUtilsMessenger/CreateDebugUtilsMessenger.cpp b/samples/CreateDebugUtilsMessenger/CreateDebugUtilsMessenger.cpp index 76522b0..f8fc3a3 100644 --- a/samples/CreateDebugUtilsMessenger/CreateDebugUtilsMessenger.cpp +++ b/samples/CreateDebugUtilsMessenger/CreateDebugUtilsMessenger.cpp @@ -114,10 +114,10 @@ int main( int /*argc*/, char ** /*argv*/ ) std::vector props = vk::enumerateInstanceExtensionProperties(); - auto propsIterator = std::find_if( props.begin(), props.end(), []( vk::ExtensionProperties const & ep ) { + auto propertyIterator = std::find_if( props.begin(), props.end(), []( vk::ExtensionProperties const & ep ) { return strcmp( ep.extensionName, VK_EXT_DEBUG_UTILS_EXTENSION_NAME ) == 0; } ); - if ( propsIterator == props.end() ) + if ( propertyIterator == props.end() ) { std::cout << "Something went very wrong, cannot find " << VK_EXT_DEBUG_UTILS_EXTENSION_NAME << " extension" << std::endl; @@ -126,11 +126,11 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::ApplicationInfo applicationInfo( AppName, 1, EngineName, 1, VK_API_VERSION_1_1 ); const char * extensionName = VK_EXT_DEBUG_UTILS_EXTENSION_NAME; - vk::UniqueInstance instance = vk::createInstanceUnique( - vk::InstanceCreateInfo( vk::InstanceCreateFlags(), &applicationInfo, {}, extensionName ) ); + vk::Instance instance = + vk::createInstance( vk::InstanceCreateInfo( vk::InstanceCreateFlags(), &applicationInfo, {}, extensionName ) ); pfnVkCreateDebugUtilsMessengerEXT = - reinterpret_cast( instance->getProcAddr( "vkCreateDebugUtilsMessengerEXT" ) ); + reinterpret_cast( instance.getProcAddr( "vkCreateDebugUtilsMessengerEXT" ) ); if ( !pfnVkCreateDebugUtilsMessengerEXT ) { std::cout << "GetInstanceProcAddr: Unable to find pfnVkCreateDebugUtilsMessengerEXT function." << std::endl; @@ -138,7 +138,7 @@ int main( int /*argc*/, char ** /*argv*/ ) } pfnVkDestroyDebugUtilsMessengerEXT = reinterpret_cast( - instance->getProcAddr( "vkDestroyDebugUtilsMessengerEXT" ) ); + instance.getProcAddr( "vkDestroyDebugUtilsMessengerEXT" ) ); if ( !pfnVkDestroyDebugUtilsMessengerEXT ) { std::cout << "GetInstanceProcAddr: Unable to find pfnVkDestroyDebugUtilsMessengerEXT function." << std::endl; @@ -150,9 +150,12 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::DebugUtilsMessageTypeFlagsEXT messageTypeFlags( vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral | vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance | vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation ); - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = instance->createDebugUtilsMessengerEXTUnique( + vk::DebugUtilsMessengerEXT debugUtilsMessenger = instance.createDebugUtilsMessengerEXT( vk::DebugUtilsMessengerCreateInfoEXT( {}, severityFlags, messageTypeFlags, &debugMessageFunc ) ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); + /* VULKAN_KEY_END */ } catch ( vk::SystemError & err ) diff --git a/samples/DebugUtilsObjectName/DebugUtilsObjectName.cpp b/samples/DebugUtilsObjectName/DebugUtilsObjectName.cpp index 171bc3c..bc51d69 100644 --- a/samples/DebugUtilsObjectName/DebugUtilsObjectName.cpp +++ b/samples/DebugUtilsObjectName/DebugUtilsObjectName.cpp @@ -31,17 +31,18 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - std::vector physicalDevices = instance->enumeratePhysicalDevices(); + std::vector physicalDevices = instance.enumeratePhysicalDevices(); assert( !physicalDevices.empty() ); uint32_t graphicsQueueFamilyIndex = vk::su::findGraphicsQueueFamilyIndex( physicalDevices[0].getQueueFamilyProperties() ); - vk::UniqueDevice device = vk::su::createDevice( physicalDevices[0], graphicsQueueFamilyIndex ); + vk::Device device = vk::su::createDevice( physicalDevices[0], graphicsQueueFamilyIndex ); // create an image vk::ImageCreateInfo imageCreateInfo( {}, @@ -53,15 +54,20 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SampleCountFlagBits::e1, vk::ImageTiling::eLinear, vk::ImageUsageFlagBits::eTransferSrc ); - vk::UniqueImage image = device->createImageUnique( imageCreateInfo ); + vk::Image image = device.createImage( imageCreateInfo ); /* VULKAN_KEY_START */ vk::DebugUtilsObjectNameInfoEXT debugUtilsObjectNameInfo( - vk::ObjectType::eImage, NON_DISPATCHABLE_HANDLE_TO_UINT64_CAST( VkImage, *image ), "Image name" ); - device->setDebugUtilsObjectNameEXT( debugUtilsObjectNameInfo ); + vk::ObjectType::eImage, NON_DISPATCHABLE_HANDLE_TO_UINT64_CAST( VkImage, image ), "Image name" ); + device.setDebugUtilsObjectNameEXT( debugUtilsObjectNameInfo ); /* VULKAN_KEY_END */ + + device.destroyImage( image ); + device.destroy(); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/DrawTexturedCube/DrawTexturedCube.cpp b/samples/DrawTexturedCube/DrawTexturedCube.cpp index daaac0a..ddc16fb 100644 --- a/samples/DrawTexturedCube/DrawTexturedCube.cpp +++ b/samples/DrawTexturedCube/DrawTexturedCube.cpp @@ -32,36 +32,36 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); @@ -69,34 +69,34 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::TextureData textureData( physicalDevice, device ); - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); textureData.setImage( device, commandBuffer, vk::su::CheckerboardImageGenerator() ); vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::su::copyToDevice( - device, uniformBufferData.deviceMemory, vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ) ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcMatrix ); - vk::UniqueDescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, depthBufferData.format ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); vk::su::BufferData vertexBufferData( @@ -106,21 +106,20 @@ int main( int /*argc*/, char ** /*argv*/ ) texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); - vk::UniqueDescriptorPool descriptorPool = vk::su::createDescriptorPool( + vk::DescriptorPool descriptorPool = vk::su::createDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 1 }, { vk::DescriptorType::eCombinedImageSampler, 1 } } ); - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); vk::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, {} } }, textureData ); - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); - vk::UniquePipeline graphicsPipeline = + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), sizeof( texturedCubeData[0] ), { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, @@ -130,51 +129,50 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_START */ // Get the index of the next available swapchain image: - vk::UniqueSemaphore imageAcquiredSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); - vk::ResultValue currentBuffer = device->acquireNextImageKHR( - swapChainData.swapChain.get(), vk::su::FenceTimeout, imageAcquiredSemaphore.get(), nullptr ); + vk::Semaphore imageAcquiredSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); + vk::ResultValue currentBuffer = + device.acquireNextImageKHR( swapChainData.swapChain, vk::su::FenceTimeout, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); - // commandBuffer->begin() has already been called above! + // commandBuffer.begin() has already been called above! std::array clearValues; clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, + framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); - commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSet.get(), nullptr ); + commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, nullptr ); - commandBuffer->bindVertexBuffers( 0, *vertexBufferData.buffer, { 0 } ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.bindVertexBuffers( 0, vertexBufferData.buffer, { 0 } ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer->draw( 12 * 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->end(); + commandBuffer.draw( 12 * 3, 1, 0, 0 ); + commandBuffer.endRenderPass(); + commandBuffer.end(); - vk::UniqueFence drawFence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence drawFence = device.createFence( vk::FenceCreateInfo() ); vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); - graphicsQueue.submit( submitInfo, drawFence.get() ); + vk::SubmitInfo submitInfo( imageAcquiredSemaphore, waitDestinationStageMask, commandBuffer ); + graphicsQueue.submit( submitInfo, drawFence ); - while ( vk::Result::eTimeout == device->waitForFences( drawFence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( drawFence, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -187,7 +185,34 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_END */ - device->waitIdle(); + device.waitIdle(); + + device.destroyFence( drawFence ); + device.destroySemaphore( imageAcquiredSemaphore ); + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + device.freeDescriptorSets( descriptorPool, descriptorSet ); + device.destroyDescriptorPool( descriptorPool ); + vertexBufferData.clear( device ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + uniformBufferData.clear( device ); + textureData.clear( device ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/DynamicUniform/DynamicUniform.cpp b/samples/DynamicUniform/DynamicUniform.cpp index 0d5124c..2303faa 100644 --- a/samples/DynamicUniform/DynamicUniform.cpp +++ b/samples/DynamicUniform/DynamicUniform.cpp @@ -43,54 +43,54 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); vk::su::DepthBufferData depthBufferData( physicalDevice, device, vk::Format::eD16Unorm, surfaceData.extent ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, depthBufferData.format ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PC_C ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); vk::su::BufferData vertexBufferData( @@ -115,23 +115,13 @@ int main( int /*argc*/, char ** /*argv*/ ) glm::mat4x4 view = glm::lookAt( glm::vec3( 0.0f, 3.0f, -10.0f ), glm::vec3( 0.0f, 0.0f, 0.0f ), glm::vec3( 0.0f, -1.0f, 0.0f ) ); glm::mat4x4 projection = glm::perspective( glm::radians( 45.0f ), 1.0f, 0.1f, 100.0f ); - glm::mat4x4 clip = glm::mat4x4( 1.0f, - 0.0f, - 0.0f, - 0.0f, - 0.0f, - -1.0f, - 0.0f, - 0.0f, - 0.0f, - 0.0f, - 0.5f, - 0.0f, - 0.0f, - 0.0f, - 0.5f, - 1.0f ); // vulkan clip space has inverted y and half z ! - mvpcs[0] = clip * projection * view * model; + // clang-format off + glm::mat4x4 clip = glm::mat4x4( 1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, -1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.5f, 0.0f, + 0.0f, 0.0f, 0.5f, 1.0f ); // vulkan clip space has inverted y and half z ! + // clang-format on + mvpcs[0] = clip * projection * view * model; model = glm::translate( model, glm::vec3( -1.5f, 1.5f, -1.5f ) ); mvpcs[1] = clip * projection * view * model; @@ -148,27 +138,26 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcs, 2, bufferSize ); // create a DescriptorSetLayout with vk::DescriptorType::eUniformBufferDynamic - vk::UniqueDescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBufferDynamic, 1, vk::ShaderStageFlagBits::eVertex } } ); - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); // create a DescriptorPool with vk::DescriptorType::eUniformBufferDynamic - vk::UniqueDescriptorPool descriptorPool = + vk::DescriptorPool descriptorPool = vk::su::createDescriptorPool( device, { { vk::DescriptorType::eUniformBufferDynamic, 1 } } ); - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); vk::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBufferDynamic, uniformBufferData.buffer, {} } }, {} ); - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); - vk::UniquePipeline graphicsPipeline = vk::su::createGraphicsPipeline( + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), sizeof( coloredCubeData[0] ), { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32B32A32Sfloat, 16 } }, vk::FrontFace::eClockwise, @@ -176,61 +165,61 @@ int main( int /*argc*/, char ** /*argv*/ ) pipelineLayout, renderPass ); // Get the index of the next available swapchain image: - vk::UniqueSemaphore imageAcquiredSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); - vk::ResultValue currentBuffer = device->acquireNextImageKHR( - swapChainData.swapChain.get(), vk::su::FenceTimeout, imageAcquiredSemaphore.get(), nullptr ); + vk::Semaphore imageAcquiredSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); + vk::ResultValue currentBuffer = + device.acquireNextImageKHR( swapChainData.swapChain, vk::su::FenceTimeout, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); - commandBuffer->begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlags() ) ); + commandBuffer.begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlags() ) ); std::array clearValues; clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, + framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); - commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); + commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); /* The first draw should use the first matrix in the buffer */ uint32_t dynamicOffset = 0; - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSet.get(), dynamicOffset ); + commandBuffer.bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, dynamicOffset ); - commandBuffer->bindVertexBuffers( 0, *vertexBufferData.buffer, { 0 } ); - commandBuffer->draw( 12 * 3, 1, 0, 0 ); + commandBuffer.bindVertexBuffers( 0, vertexBufferData.buffer, { 0 } ); + commandBuffer.draw( 12 * 3, 1, 0, 0 ); // the second draw should use the second matrix in the buffer; dynamicOffset = (uint32_t)bufferSize; - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSet.get(), dynamicOffset ); - commandBuffer->draw( 12 * 3, 1, 0, 0 ); + commandBuffer.bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, dynamicOffset ); + commandBuffer.draw( 12 * 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->end(); + commandBuffer.endRenderPass(); + commandBuffer.end(); - vk::UniqueFence drawFence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence drawFence = device.createFence( vk::FenceCreateInfo() ); vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); - graphicsQueue.submit( submitInfo, drawFence.get() ); + vk::SubmitInfo submitInfo( imageAcquiredSemaphore, waitDestinationStageMask, commandBuffer ); + graphicsQueue.submit( submitInfo, drawFence ); - while ( vk::Result::eTimeout == device->waitForFences( drawFence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( drawFence, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -243,7 +232,33 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_END */ - device->waitIdle(); + device.waitIdle(); + + device.destroyFence( drawFence ); + device.destroySemaphore( imageAcquiredSemaphore ); + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + device.freeDescriptorSets( descriptorPool, descriptorSet ); + device.destroyDescriptorPool( descriptorPool ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + uniformBufferData.clear( device ); + vertexBufferData.clear( device ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/EnableValidationWithCallback/EnableValidationWithCallback.cpp b/samples/EnableValidationWithCallback/EnableValidationWithCallback.cpp index 2823b15..5e87670 100644 --- a/samples/EnableValidationWithCallback/EnableValidationWithCallback.cpp +++ b/samples/EnableValidationWithCallback/EnableValidationWithCallback.cpp @@ -148,15 +148,15 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::ApplicationInfo applicationInfo( AppName, 1, EngineName, 1, VK_API_VERSION_1_1 ); vk::InstanceCreateInfo instanceCreateInfo( vk::InstanceCreateFlags(), &applicationInfo, instanceLayerNames, instanceExtensionNames ); - vk::UniqueInstance instance = vk::createInstanceUnique( instanceCreateInfo ); + vk::Instance instance = vk::createInstance( instanceCreateInfo ); #if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) // initialize function pointers for instance - VULKAN_HPP_DEFAULT_DISPATCHER.init( *instance ); + VULKAN_HPP_DEFAULT_DISPATCHER.init( instance ); #endif pfnVkCreateDebugUtilsMessengerEXT = - reinterpret_cast( instance->getProcAddr( "vkCreateDebugUtilsMessengerEXT" ) ); + reinterpret_cast( instance.getProcAddr( "vkCreateDebugUtilsMessengerEXT" ) ); if ( !pfnVkCreateDebugUtilsMessengerEXT ) { std::cout << "GetInstanceProcAddr: Unable to find pfnVkCreateDebugUtilsMessengerEXT function." << std::endl; @@ -164,7 +164,7 @@ int main( int /*argc*/, char ** /*argv*/ ) } pfnVkDestroyDebugUtilsMessengerEXT = reinterpret_cast( - instance->getProcAddr( "vkDestroyDebugUtilsMessengerEXT" ) ); + instance.getProcAddr( "vkDestroyDebugUtilsMessengerEXT" ) ); if ( !pfnVkDestroyDebugUtilsMessengerEXT ) { std::cout << "GetInstanceProcAddr: Unable to find pfnVkDestroyDebugUtilsMessengerEXT function." << std::endl; @@ -176,35 +176,37 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::DebugUtilsMessageTypeFlagsEXT messageTypeFlags( vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral | vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance | vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation ); - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = instance->createDebugUtilsMessengerEXTUnique( + vk::DebugUtilsMessengerEXT debugUtilsMessenger = instance.createDebugUtilsMessengerEXT( vk::DebugUtilsMessengerCreateInfoEXT( {}, severityFlags, messageTypeFlags, &debugMessageFunc ) ); - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); - std::vector queueFamilyProperties = physicalDevice.getQueueFamilyProperties(); - assert( !queueFamilyProperties.empty() ); - - auto qfpIt = std::find_if( - queueFamilyProperties.begin(), queueFamilyProperties.end(), []( vk::QueueFamilyProperties const & qfp ) { - return !!( qfp.queueFlags & vk::QueueFlagBits::eGraphics ); - } ); - assert( qfpIt != queueFamilyProperties.end() ); - uint32_t queueFamilyIndex = static_cast( std::distance( queueFamilyProperties.begin(), qfpIt ) ); + // get the index of the first queue family that supports graphics + uint32_t graphicsQueueFamilyIndex = + vk::su::findGraphicsQueueFamilyIndex( physicalDevice.getQueueFamilyProperties() ); float queuePriority = 0.0f; vk::DeviceQueueCreateInfo deviceQueueCreateInfo( - vk::DeviceQueueCreateFlags(), queueFamilyIndex, 1, &queuePriority ); - vk::UniqueDevice device = - physicalDevice.createDeviceUnique( vk::DeviceCreateInfo( vk::DeviceCreateFlags(), deviceQueueCreateInfo ) ); + vk::DeviceQueueCreateFlags(), graphicsQueueFamilyIndex, 1, &queuePriority ); + vk::Device device = + physicalDevice.createDevice( vk::DeviceCreateInfo( vk::DeviceCreateFlags(), deviceQueueCreateInfo ) ); - // Create a command pool (not a UniqueCommandPool, for testing purposes! + // Create a CommandPool and don't destroy it, for testing purposes! vk::CommandPool commandPool = - device->createCommandPool( vk::CommandPoolCreateInfo( vk::CommandPoolCreateFlags(), queueFamilyIndex ) ); + device.createCommandPool( vk::CommandPoolCreateInfo( vk::CommandPoolCreateFlags(), graphicsQueueFamilyIndex ) ); - // The commandPool is not destroyed automatically (as it's not a UniqueCommandPool. +#if true + // The commandPool is not destroyed automatically // That is, the device is destroyed before the commmand pool and will trigger a validation error. - std::cout << "*** INTENTIONALLY calling vkDestroyDevice before destroying command pool ***\n"; + std::cout << "*** INTENTIONALLY destroying the Device before destroying a CommandPool ***\n"; std::cout << "*** The following error message is EXPECTED ***\n"; +#else + device.destroyCommandPool( commandPool ); +#endif + + device.destroy(); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); /* VULKAN_KEY_END */ } diff --git a/samples/EnumerateDevicesAdvanced/EnumerateDevicesAdvanced.cpp b/samples/EnumerateDevicesAdvanced/EnumerateDevicesAdvanced.cpp index deb50dc..b1b7cba 100644 --- a/samples/EnumerateDevicesAdvanced/EnumerateDevicesAdvanced.cpp +++ b/samples/EnumerateDevicesAdvanced/EnumerateDevicesAdvanced.cpp @@ -28,15 +28,16 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif /* VULKAN_HPP_KEY_START */ // enumerate the physicalDevices - std::vector physicalDevices = instance->enumeratePhysicalDevices(); + std::vector physicalDevices = instance.enumeratePhysicalDevices(); for ( auto const & physicalDevice : physicalDevices ) { @@ -63,6 +64,9 @@ int main( int /*argc*/, char ** /*argv*/ ) } /* VULKAN_HPP_KEY_END */ + + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/Events/Events.cpp b/samples/Events/Events.cpp index 3ddcefa..16c1193 100644 --- a/samples/Events/Events.cpp +++ b/samples/Events/Events.cpp @@ -27,43 +27,43 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); uint32_t graphicsQueueFamilyIndex = vk::su::findGraphicsQueueFamilyIndex( physicalDevice.getQueueFamilyProperties() ); - vk::UniqueDevice device = vk::su::createDevice( physicalDevice, graphicsQueueFamilyIndex ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsQueueFamilyIndex ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsQueueFamilyIndex ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsQueueFamilyIndex ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsQueueFamilyIndex, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsQueueFamilyIndex, 0 ); /* VULKAN_KEY_START */ // Start with a trivial command buffer and make sure fence wait doesn't time out - commandBuffer->begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlags() ) ); - commandBuffer->setViewport( 0, vk::Viewport( 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 1.0f ) ); - commandBuffer->end(); + commandBuffer.begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlags() ) ); + commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 1.0f ) ); + commandBuffer.end(); - vk::UniqueFence fence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence fence = device.createFence( vk::FenceCreateInfo() ); - vk::SubmitInfo submitInfo( {}, {}, *commandBuffer ); - graphicsQueue.submit( submitInfo, fence.get() ); + vk::SubmitInfo submitInfo( {}, {}, commandBuffer ); + graphicsQueue.submit( submitInfo, fence ); // Make sure timeout is long enough for a simple command buffer without waiting for an event vk::Result result; int timeouts = -1; do { - result = device->waitForFences( fence.get(), true, vk::su::FenceTimeout ); + result = device.waitForFences( fence, true, vk::su::FenceTimeout ); timeouts++; } while ( result == vk::Result::eTimeout ); assert( result == vk::Result::eSuccess ); @@ -74,25 +74,21 @@ int main( int /*argc*/, char ** /*argv*/ ) } // Now create an event and wait for it on the GPU - vk::UniqueEvent event = device->createEventUnique( vk::EventCreateInfo( vk::EventCreateFlags() ) ); + vk::Event event = device.createEvent( vk::EventCreateInfo( vk::EventCreateFlags() ) ); - commandBuffer->reset( vk::CommandBufferResetFlags() ); - commandBuffer->begin( vk::CommandBufferBeginInfo() ); - commandBuffer->waitEvents( event.get(), - vk::PipelineStageFlagBits::eHost, - vk::PipelineStageFlagBits::eBottomOfPipe, - nullptr, - nullptr, - nullptr ); - commandBuffer->end(); - device->resetFences( fence.get() ); + commandBuffer.reset( vk::CommandBufferResetFlags() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); + commandBuffer.waitEvents( + event, vk::PipelineStageFlagBits::eHost, vk::PipelineStageFlagBits::eBottomOfPipe, nullptr, nullptr, nullptr ); + commandBuffer.end(); + device.resetFences( fence ); // Note that stepping through this code in the debugger is a bad idea because the GPU can TDR waiting for the event. // Execute the code from vk::Queue::submit() through vk::Device::setEvent() without breakpoints - graphicsQueue.submit( submitInfo, fence.get() ); + graphicsQueue.submit( submitInfo, fence ); // We should timeout waiting for the fence because the GPU should be waiting on the event - result = device->waitForFences( fence.get(), true, vk::su::FenceTimeout ); + result = device.waitForFences( fence, true, vk::su::FenceTimeout ); if ( result != vk::Result::eTimeout ) { std::cout << "Didn't get expected timeout in vk::Device::waitForFences, exiting\n"; @@ -101,44 +97,53 @@ int main( int /*argc*/, char ** /*argv*/ ) // Set the event from the CPU and wait for the fence. // This should succeed since we set the event - device->setEvent( event.get() ); + device.setEvent( event ); do { - result = device->waitForFences( fence.get(), true, vk::su::FenceTimeout ); + result = device.waitForFences( fence, true, vk::su::FenceTimeout ); } while ( result == vk::Result::eTimeout ); assert( result == vk::Result::eSuccess ); - commandBuffer->reset( {} ); - device->resetFences( fence.get() ); - device->resetEvent( event.get() ); + commandBuffer.reset( {} ); + device.resetFences( fence ); + device.resetEvent( event ); // Now set the event from the GPU and wait on the CPU - commandBuffer->begin( vk::CommandBufferBeginInfo() ); - commandBuffer->setEvent( event.get(), vk::PipelineStageFlagBits::eBottomOfPipe ); - commandBuffer->end(); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); + commandBuffer.setEvent( event, vk::PipelineStageFlagBits::eBottomOfPipe ); + commandBuffer.end(); // Look for the event on the CPU. It should be vk::Result::eEventReset since we haven't sent the command buffer yet. - result = device->getEventStatus( event.get() ); + result = device.getEventStatus( event ); assert( result == vk::Result::eEventReset ); // Send the command buffer and loop waiting for the event - graphicsQueue.submit( submitInfo, fence.get() ); + graphicsQueue.submit( submitInfo, fence ); int polls = 0; do { - result = device->getEventStatus( event.get() ); + result = device.getEventStatus( event ); polls++; } while ( result != vk::Result::eEventSet ); printf( "%d polls to find the event set\n", polls ); do { - result = device->waitForFences( fence.get(), true, vk::su::FenceTimeout ); + result = device.waitForFences( fence, true, vk::su::FenceTimeout ); } while ( result == vk::Result::eTimeout ); assert( result == vk::Result::eSuccess ); + device.destroyEvent( event ); + device.destroyFence( fence ); + /* VULKAN_KEY_END */ + + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/ImmutableSampler/ImmutableSampler.cpp b/samples/ImmutableSampler/ImmutableSampler.cpp index efc65e9..fed8239 100644 --- a/samples/ImmutableSampler/ImmutableSampler.cpp +++ b/samples/ImmutableSampler/ImmutableSampler.cpp @@ -41,36 +41,36 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); @@ -78,22 +78,22 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::su::copyToDevice( - device, uniformBufferData.deviceMemory, vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ) ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcMatrix ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, depthBufferData.format ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); vk::su::BufferData vertexBufferData( @@ -107,51 +107,49 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::TextureData textureData( physicalDevice, device ); - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); textureData.setImage( device, commandBuffer, vk::su::CheckerboardImageGenerator() ); std::array bindings = { vk::DescriptorSetLayoutBinding( 0, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex ), vk::DescriptorSetLayoutBinding( - 1, vk::DescriptorType::eCombinedImageSampler, vk::ShaderStageFlagBits::eFragment, *textureData.textureSampler ) + 1, vk::DescriptorType::eCombinedImageSampler, vk::ShaderStageFlagBits::eFragment, textureData.sampler ) }; - vk::UniqueDescriptorSetLayout descriptorSetLayout = device->createDescriptorSetLayoutUnique( + vk::DescriptorSetLayout descriptorSetLayout = device.createDescriptorSetLayout( vk::DescriptorSetLayoutCreateInfo( vk::DescriptorSetLayoutCreateFlags(), bindings ) ); // Create pipeline layout - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); // Create a single pool to contain data for our descriptor set std::array poolSizes = { vk::DescriptorPoolSize( vk::DescriptorType::eUniformBuffer, 1 ), vk::DescriptorPoolSize( vk::DescriptorType::eCombinedImageSampler, 1 ) }; - vk::UniqueDescriptorPool descriptorPool = device->createDescriptorPoolUnique( + vk::DescriptorPool descriptorPool = device.createDescriptorPool( vk::DescriptorPoolCreateInfo( vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 1, poolSizes ) ); // Populate descriptor sets - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); - vk::DescriptorBufferInfo bufferInfo( uniformBufferData.buffer.get(), 0, sizeof( glm::mat4x4 ) ); - vk::DescriptorImageInfo imageInfo( textureData.textureSampler.get(), - textureData.imageData->imageView.get(), - vk::ImageLayout::eShaderReadOnlyOptimal ); - vk::WriteDescriptorSet writeDescriptorSets[2] = { - vk::WriteDescriptorSet( descriptorSet.get(), 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), - vk::WriteDescriptorSet( descriptorSet.get(), 1, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo ) + vk::DescriptorBufferInfo bufferInfo( uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::DescriptorImageInfo imageInfo( + textureData.sampler, textureData.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + std::array writeDescriptorSets = { + vk::WriteDescriptorSet( descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), + vk::WriteDescriptorSet( descriptorSet, 1, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo ) }; - device->updateDescriptorSets( vk::ArrayProxy( 2, writeDescriptorSets ), nullptr ); + device.updateDescriptorSets( writeDescriptorSets, nullptr ); /* VULKAN_KEY_END */ - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); - vk::UniquePipeline graphicsPipeline = + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), sizeof( texturedCubeData[0] ), { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, @@ -159,42 +157,41 @@ int main( int /*argc*/, char ** /*argv*/ ) pipelineLayout, renderPass ); - vk::UniqueSemaphore imageAcquiredSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); - vk::ResultValue currentBuffer = device->acquireNextImageKHR( - swapChainData.swapChain.get(), vk::su::FenceTimeout, imageAcquiredSemaphore.get(), nullptr ); + vk::Semaphore imageAcquiredSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); + vk::ResultValue currentBuffer = + device.acquireNextImageKHR( swapChainData.swapChain, vk::su::FenceTimeout, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); std::array clearValues; clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, + framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); - commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSet.get(), nullptr ); + commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, nullptr ); - commandBuffer->bindVertexBuffers( 0, *vertexBufferData.buffer, { 0 } ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.bindVertexBuffers( 0, vertexBufferData.buffer, { 0 } ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer->draw( 12 * 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->end(); + commandBuffer.draw( 12 * 3, 1, 0, 0 ); + commandBuffer.endRenderPass(); + commandBuffer.end(); vk::su::submitAndWait( device, graphicsQueue, commandBuffer ); vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -205,7 +202,33 @@ int main( int /*argc*/, char ** /*argv*/ ) } std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); - device->waitIdle(); + device.waitIdle(); + + device.destroySemaphore( imageAcquiredSemaphore ); + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + device.freeDescriptorSets( descriptorPool, descriptorSet ); + device.destroyDescriptorPool( descriptorPool ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + textureData.clear( device ); + vertexBufferData.clear( device ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyRenderPass( renderPass ); + uniformBufferData.clear( device ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/InitTexture/InitTexture.cpp b/samples/InitTexture/InitTexture.cpp index 8552cdf..4e13ef4 100644 --- a/samples/InitTexture/InitTexture.cpp +++ b/samples/InitTexture/InitTexture.cpp @@ -41,28 +41,28 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 50, 50 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); /* VULKAN_KEY_START */ @@ -72,66 +72,66 @@ int main( int /*argc*/, char ** /*argv*/ ) // See if we can use a linear tiled image for a texture, if not, we will need a staging buffer for the texture data bool needsStaging = !( formatProperties.linearTilingFeatures & vk::FormatFeatureFlagBits::eSampledImage ); - vk::UniqueImage image = device->createImageUnique( - vk::ImageCreateInfo( vk::ImageCreateFlags(), - vk::ImageType::e2D, - format, - vk::Extent3D( surfaceData.extent, 1 ), - 1, - 1, - vk::SampleCountFlagBits::e1, - needsStaging ? vk::ImageTiling::eOptimal : vk::ImageTiling::eLinear, - vk::ImageUsageFlagBits::eSampled | - ( needsStaging ? vk::ImageUsageFlagBits::eTransferDst : vk::ImageUsageFlagBits() ), - vk::SharingMode::eExclusive, - {}, - needsStaging ? vk::ImageLayout::eUndefined : vk::ImageLayout::ePreinitialized ) ); + vk::ImageCreateInfo imageCreateInfo( + vk::ImageCreateFlags(), + vk::ImageType::e2D, + format, + vk::Extent3D( surfaceData.extent, 1 ), + 1, + 1, + vk::SampleCountFlagBits::e1, + needsStaging ? vk::ImageTiling::eOptimal : vk::ImageTiling::eLinear, + vk::ImageUsageFlagBits::eSampled | + ( needsStaging ? vk::ImageUsageFlagBits::eTransferDst : vk::ImageUsageFlagBits() ), + vk::SharingMode::eExclusive, + {}, + needsStaging ? vk::ImageLayout::eUndefined : vk::ImageLayout::ePreinitialized ); + vk::Image image = device.createImage( imageCreateInfo ); - vk::MemoryRequirements memoryRequirements = device->getImageMemoryRequirements( image.get() ); + vk::MemoryRequirements memoryRequirements = device.getImageMemoryRequirements( image ); uint32_t memoryTypeIndex = vk::su::findMemoryType( physicalDevice.getMemoryProperties(), memoryRequirements.memoryTypeBits, needsStaging ? vk::MemoryPropertyFlags() - : ( vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ) ); + : ( vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ) ); // allocate memory - vk::UniqueDeviceMemory imageMemory = - device->allocateMemoryUnique( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); + vk::DeviceMemory imageMemory = + device.allocateMemory( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); // bind memory - device->bindImageMemory( image.get(), imageMemory.get(), 0 ); + device.bindImageMemory( image, imageMemory, 0 ); - vk::UniqueBuffer textureBuffer; - vk::UniqueDeviceMemory textureBufferMemory; + vk::Buffer textureBuffer; + vk::DeviceMemory textureBufferMemory; if ( needsStaging ) { // Need a staging buffer to map and copy texture into textureBuffer = - device->createBufferUnique( vk::BufferCreateInfo( vk::BufferCreateFlags(), - surfaceData.extent.width * surfaceData.extent.height * 4, - vk::BufferUsageFlagBits::eTransferSrc ) ); + device.createBuffer( vk::BufferCreateInfo( vk::BufferCreateFlags(), + surfaceData.extent.width * surfaceData.extent.height * 4, + vk::BufferUsageFlagBits::eTransferSrc ) ); - memoryRequirements = device->getBufferMemoryRequirements( textureBuffer.get() ); + memoryRequirements = device.getBufferMemoryRequirements( textureBuffer ); memoryTypeIndex = vk::su::findMemoryType( physicalDevice.getMemoryProperties(), memoryRequirements.memoryTypeBits, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ); // allocate memory - textureBufferMemory = - device->allocateMemoryUnique( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); + textureBufferMemory = device.allocateMemory( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); // bind memory - device->bindBufferMemory( textureBuffer.get(), textureBufferMemory.get(), 0 ); + device.bindBufferMemory( textureBuffer, textureBufferMemory, 0 ); } else { vk::SubresourceLayout subresourceLayout = - device->getImageSubresourceLayout( image.get(), vk::ImageSubresource( vk::ImageAspectFlagBits::eColor ) ); + device.getImageSubresourceLayout( image, vk::ImageSubresource( vk::ImageAspectFlagBits::eColor ) ); } - void * data = device->mapMemory( - needsStaging ? textureBufferMemory.get() : imageMemory.get(), 0, memoryRequirements.size, vk::MemoryMapFlags() ); + void * data = device.mapMemory( + needsStaging ? textureBufferMemory : imageMemory, 0, memoryRequirements.size, vk::MemoryMapFlags() ); // Checkerboard of 16x16 pixel squares unsigned char * pImageMemory = static_cast( data ); @@ -148,68 +148,73 @@ int main( int /*argc*/, char ** /*argv*/ ) } } - device->unmapMemory( needsStaging ? textureBufferMemory.get() : imageMemory.get() ); + device.unmapMemory( needsStaging ? textureBufferMemory : imageMemory ); - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); if ( needsStaging ) { // Since we're going to blit to the texture image, set its layout to eTransferDstOptimal vk::su::setImageLayout( - commandBuffer, image.get(), format, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); + commandBuffer, image, format, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); vk::BufferImageCopy copyRegion( 0, surfaceData.extent.width, surfaceData.extent.height, vk::ImageSubresourceLayers( vk::ImageAspectFlagBits::eColor, 0, 0, 1 ), vk::Offset3D( 0, 0, 0 ), vk::Extent3D( surfaceData.extent, 1 ) ); - commandBuffer->copyBufferToImage( - textureBuffer.get(), image.get(), vk::ImageLayout::eTransferDstOptimal, copyRegion ); + commandBuffer.copyBufferToImage( textureBuffer, image, vk::ImageLayout::eTransferDstOptimal, copyRegion ); // Set the layout for the texture image from eTransferDstOptimal to SHADER_READ_ONLY - vk::su::setImageLayout( commandBuffer, - image.get(), - format, - vk::ImageLayout::eTransferDstOptimal, - vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::su::setImageLayout( + commandBuffer, image, format, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal ); } else { // If we can use the linear tiled image as a texture, just do it vk::su::setImageLayout( - commandBuffer, image.get(), format, vk::ImageLayout::ePreinitialized, vk::ImageLayout::eShaderReadOnlyOptimal ); + commandBuffer, image, format, vk::ImageLayout::ePreinitialized, vk::ImageLayout::eShaderReadOnlyOptimal ); } - commandBuffer->end(); + commandBuffer.end(); vk::su::submitAndWait( device, graphicsQueue, commandBuffer ); - vk::UniqueSampler sampler = - device->createSamplerUnique( vk::SamplerCreateInfo( vk::SamplerCreateFlags(), - vk::Filter::eNearest, - vk::Filter::eNearest, - vk::SamplerMipmapMode::eNearest, - vk::SamplerAddressMode::eClampToEdge, - vk::SamplerAddressMode::eClampToEdge, - vk::SamplerAddressMode::eClampToEdge, - 0.0f, - false, - 1.0f, - false, - vk::CompareOp::eNever, - 0.0f, - 0.0f, - vk::BorderColor::eFloatOpaqueWhite ) ); + vk::SamplerCreateInfo samplerCreateInfo( vk::SamplerCreateFlags(), + vk::Filter::eNearest, + vk::Filter::eNearest, + vk::SamplerMipmapMode::eNearest, + vk::SamplerAddressMode::eClampToEdge, + vk::SamplerAddressMode::eClampToEdge, + vk::SamplerAddressMode::eClampToEdge, + 0.0f, + false, + 1.0f, + false, + vk::CompareOp::eNever, + 0.0f, + 0.0f, + vk::BorderColor::eFloatOpaqueWhite ); + vk::Sampler sampler = device.createSampler( samplerCreateInfo ); vk::ComponentMapping componentMapping( vk::ComponentSwizzle::eR, vk::ComponentSwizzle::eG, vk::ComponentSwizzle::eB, vk::ComponentSwizzle::eA ); - vk::ImageViewCreateInfo imageViewCreateInfo( - vk::ImageViewCreateFlags(), - image.get(), - vk::ImageViewType::e2D, - format, - componentMapping, - vk::ImageSubresourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ) ); - vk::UniqueImageView imageView = device->createImageViewUnique( imageViewCreateInfo ); + vk::ImageSubresourceRange imageSubresourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ); + vk::ImageViewCreateInfo imageViewCreateInfo( + {}, image, vk::ImageViewType::e2D, format, componentMapping, imageSubresourceRange ); + vk::ImageView imageView = device.createImageView( imageViewCreateInfo ); /* VULKAN_KEY_END */ + + device.destroyImageView( imageView ); + device.destroySampler( sampler ); + device.freeMemory( textureBufferMemory ); + device.destroyBuffer( textureBuffer ); + device.freeMemory( imageMemory ); + device.destroyImage( image ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/InputAttachment/InputAttachment.cpp b/samples/InputAttachment/InputAttachment.cpp index 90f1cf9..b8bb6fb 100644 --- a/samples/InputAttachment/InputAttachment.cpp +++ b/samples/InputAttachment/InputAttachment.cpp @@ -70,12 +70,13 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::FormatProperties formatProperties = physicalDevice.getFormatProperties( vk::Format::eR8G8B8A8Unorm ); if ( !( formatProperties.optimalTilingFeatures & vk::FormatFeatureFlagBits::eColorAttachment ) ) @@ -87,26 +88,25 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); @@ -118,34 +118,35 @@ int main( int /*argc*/, char ** /*argv*/ ) // Create the image that will be used as the input attachment // The image for the color attachment is the presentable image already created as part of the SwapChainData - vk::UniqueImage inputImage = device->createImageUnique( - vk::ImageCreateInfo( vk::ImageCreateFlags(), - vk::ImageType::e2D, - swapChainData.colorFormat, - vk::Extent3D( surfaceData.extent, 1 ), - 1, - 1, - vk::SampleCountFlagBits::e1, - vk::ImageTiling::eOptimal, - vk::ImageUsageFlagBits::eInputAttachment | vk::ImageUsageFlagBits::eTransferDst ) ); + vk::ImageCreateInfo imageCreateInfo( vk::ImageCreateFlags(), + vk::ImageType::e2D, + swapChainData.colorFormat, + vk::Extent3D( surfaceData.extent, 1 ), + 1, + 1, + vk::SampleCountFlagBits::e1, + vk::ImageTiling::eOptimal, + vk::ImageUsageFlagBits::eInputAttachment | + vk::ImageUsageFlagBits::eTransferDst ); + vk::Image inputImage = device.createImage( imageCreateInfo ); - vk::MemoryRequirements memoryRequirements = device->getImageMemoryRequirements( inputImage.get() ); + vk::MemoryRequirements memoryRequirements = device.getImageMemoryRequirements( inputImage ); uint32_t memoryTypeIndex = vk::su::findMemoryType( physicalDevice.getMemoryProperties(), memoryRequirements.memoryTypeBits, vk::MemoryPropertyFlags() ); - vk::UniqueDeviceMemory inputMemory = - device->allocateMemoryUnique( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); - device->bindImageMemory( inputImage.get(), inputMemory.get(), 0 ); + vk::DeviceMemory inputMemory = + device.allocateMemory( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); + device.bindImageMemory( inputImage, inputMemory, 0 ); // Set the image layout to TRANSFER_DST_OPTIMAL to be ready for clear - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); vk::su::setImageLayout( commandBuffer, - inputImage.get(), + inputImage, swapChainData.colorFormat, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); - commandBuffer->clearColorImage( - inputImage.get(), + commandBuffer.clearColorImage( + inputImage, vk::ImageLayout::eTransferDstOptimal, vk::ClearColorValue( std::array( { { 1.0f, 1.0f, 0.0f, 0.0f } } ) ), vk::ImageSubresourceRange( @@ -153,29 +154,25 @@ int main( int /*argc*/, char ** /*argv*/ ) // Set the image layout to SHADER_READONLY_OPTIMAL for use by the shaders vk::su::setImageLayout( commandBuffer, - inputImage.get(), + inputImage, swapChainData.colorFormat, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal ); vk::ComponentMapping componentMapping( vk::ComponentSwizzle::eR, vk::ComponentSwizzle::eG, vk::ComponentSwizzle::eB, vk::ComponentSwizzle::eA ); - vk::ImageViewCreateInfo imageViewCreateInfo( - vk::ImageViewCreateFlags(), - inputImage.get(), - vk::ImageViewType::e2D, - swapChainData.colorFormat, - componentMapping, - vk::ImageSubresourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ) ); - vk::UniqueImageView inputAttachmentView = device->createImageViewUnique( imageViewCreateInfo ); + vk::ImageSubresourceRange imageSubresourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ); + vk::ImageViewCreateInfo imageViewCreateInfo( + {}, inputImage, vk::ImageViewType::e2D, swapChainData.colorFormat, componentMapping, imageSubresourceRange ); + vk::ImageView inputAttachmentView = device.createImageView( imageViewCreateInfo ); vk::DescriptorSetLayoutBinding layoutBinding( 0, vk::DescriptorType::eInputAttachment, 1, vk::ShaderStageFlagBits::eFragment ); - vk::UniqueDescriptorSetLayout descriptorSetLayout = device->createDescriptorSetLayoutUnique( + vk::DescriptorSetLayout descriptorSetLayout = device.createDescriptorSetLayout( vk::DescriptorSetLayoutCreateInfo( vk::DescriptorSetLayoutCreateFlags(), layoutBinding ) ); - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); std::array attachments = { // First attachment is the color attachment - clear at the beginning of the renderpass and transition layout to @@ -206,82 +203,77 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::AttachmentReference inputReference( 1, vk::ImageLayout::eShaderReadOnlyOptimal ); vk::SubpassDescription subPass( vk::SubpassDescriptionFlags(), vk::PipelineBindPoint::eGraphics, inputReference, colorReference ); - vk::UniqueRenderPass renderPass = - device->createRenderPassUnique( vk::RenderPassCreateInfo( vk::RenderPassCreateFlags(), attachments, subPass ) ); + vk::RenderPass renderPass = + device.createRenderPass( vk::RenderPassCreateInfo( vk::RenderPassCreateFlags(), attachments, subPass ) ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, inputAttachmentView, surfaceData.extent ); - vk::DescriptorPoolSize poolSize( vk::DescriptorType::eInputAttachment, 1 ); - vk::UniqueDescriptorPool descriptorPool = device->createDescriptorPoolUnique( + vk::DescriptorPoolSize poolSize( vk::DescriptorType::eInputAttachment, 1 ); + vk::DescriptorPool descriptorPool = device.createDescriptorPool( vk::DescriptorPoolCreateInfo( vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 1, poolSize ) ); - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); - vk::DescriptorImageInfo inputImageInfo( - nullptr, inputAttachmentView.get(), vk::ImageLayout::eShaderReadOnlyOptimal ); - vk::WriteDescriptorSet writeDescriptorSet( - descriptorSet.get(), 0, 0, vk::DescriptorType::eInputAttachment, inputImageInfo ); - device->updateDescriptorSets( vk::ArrayProxy( 1, &writeDescriptorSet ), nullptr ); + vk::DescriptorImageInfo inputImageInfo( nullptr, inputAttachmentView, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::WriteDescriptorSet writeDescriptorSet( + descriptorSet, 0, 0, vk::DescriptorType::eInputAttachment, inputImageInfo ); + device.updateDescriptorSets( writeDescriptorSet, nullptr ); - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); - vk::UniquePipeline graphicsPipeline = - vk::su::createGraphicsPipeline( device, - pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), - 0, - {}, - vk::FrontFace::eClockwise, - false, - pipelineLayout, - renderPass ); + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, + pipelineCache, + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), + 0, + {}, + vk::FrontFace::eClockwise, + false, + pipelineLayout, + renderPass ); - vk::UniqueSemaphore imageAcquiredSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); + vk::Semaphore imageAcquiredSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); - vk::ResultValue nexImage = device->acquireNextImage2KHR( - vk::AcquireNextImageInfoKHR( swapChainData.swapChain.get(), UINT64_MAX, imageAcquiredSemaphore.get(), {}, 1 ) ); + vk::ResultValue nexImage = device.acquireNextImage2KHR( + vk::AcquireNextImageInfoKHR( swapChainData.swapChain, UINT64_MAX, imageAcquiredSemaphore, {}, 1 ) ); assert( nexImage.result == vk::Result::eSuccess ); uint32_t currentBuffer = nexImage.value; vk::ClearValue clearValue; clearValue.color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); - commandBuffer->beginRenderPass( vk::RenderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer].get(), - vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), - clearValue ), - vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSet.get(), nullptr ); + commandBuffer.beginRenderPass( + vk::RenderPassBeginInfo( + renderPass, framebuffers[currentBuffer], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValue ), + vk::SubpassContents::eInline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, nullptr ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer->draw( 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->end(); + commandBuffer.draw( 3, 1, 0, 0 ); + commandBuffer.endRenderPass(); + commandBuffer.end(); /* VULKAN_KEY_END */ vk::su::submitAndWait( device, graphicsQueue, commandBuffer ); - vk::Result result = presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer ) ); + vk::Result result = presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -291,6 +283,31 @@ int main( int /*argc*/, char ** /*argv*/ ) default: assert( false ); // an unexpected result is returned ! } std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + device.destroySemaphore( imageAcquiredSemaphore ); + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + device.freeDescriptorSets( descriptorPool, descriptorSet ); + device.destroyDescriptorPool( descriptorPool ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + device.destroyImageView( inputAttachmentView ); + device.freeMemory( inputMemory ); + device.destroyImage( inputImage ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/InstanceLayerExtensionProperties/InstanceLayerExtensionProperties.cpp b/samples/InstanceLayerExtensionProperties/InstanceLayerExtensionProperties.cpp index 5401c32..6572900 100644 --- a/samples/InstanceLayerExtensionProperties/InstanceLayerExtensionProperties.cpp +++ b/samples/InstanceLayerExtensionProperties/InstanceLayerExtensionProperties.cpp @@ -47,7 +47,7 @@ int main( int /*argc*/, char ** /*argv*/ ) { std::vector extensionProperties = vk::enumerateInstanceExtensionProperties( vk::Optional( layerProperty.layerName ) ); - propertyData.push_back( PropertyData( layerProperty, extensionProperties ) ); + propertyData.emplace_back( layerProperty, extensionProperties ); } /* VULKAN_KEY_END */ @@ -62,9 +62,10 @@ int main( int /*argc*/, char ** /*argv*/ ) for ( auto const & pd : propertyData ) { std::cout << pd.layerProperties.layerName << std::endl; + std::cout << "Layer Extensions: "; if ( pd.extensionProperties.empty() ) { - std::cout << "Layer Extension: None"; + std::cout << "None"; } else { diff --git a/samples/InstanceVersion/InstanceVersion.cpp b/samples/InstanceVersion/InstanceVersion.cpp index 7027dc0..b197eb7 100644 --- a/samples/InstanceVersion/InstanceVersion.cpp +++ b/samples/InstanceVersion/InstanceVersion.cpp @@ -33,7 +33,7 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_START */ uint32_t apiVersion = vk::enumerateInstanceVersion(); - std::cout << "APIVersion = " << decodeAPIVersion( apiVersion ); + std::cout << "APIVersion = " << decodeAPIVersion( apiVersion ) << std::endl; /* VULKAN_KEY_END */ } diff --git a/samples/MultipleSets/MultipleSets.cpp b/samples/MultipleSets/MultipleSets.cpp index 2f7e450..0db298a 100644 --- a/samples/MultipleSets/MultipleSets.cpp +++ b/samples/MultipleSets/MultipleSets.cpp @@ -91,36 +91,36 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); @@ -128,27 +128,27 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::TextureData textureData( physicalDevice, device ); - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); textureData.setImage( device, commandBuffer, vk::su::MonochromeImageGenerator( { 118, 185, 0 } ) ); vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::su::copyToDevice( - device, uniformBufferData.deviceMemory, vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ) ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcMatrix ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, depthBufferData.format ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); vk::su::BufferData vertexBufferData( @@ -163,52 +163,49 @@ int main( int /*argc*/, char ** /*argv*/ ) // Create first layout to contain uniform buffer data vk::DescriptorSetLayoutBinding uniformBinding( 0, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex ); - vk::UniqueDescriptorSetLayout uniformLayout = device->createDescriptorSetLayoutUnique( + vk::DescriptorSetLayout uniformLayout = device.createDescriptorSetLayout( vk::DescriptorSetLayoutCreateInfo( vk::DescriptorSetLayoutCreateFlags(), uniformBinding ) ); // Create second layout containing combined sampler/image data vk::DescriptorSetLayoutBinding sampler2DBinding( 0, vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eVertex ); - vk::UniqueDescriptorSetLayout samplerLayout = device->createDescriptorSetLayoutUnique( + vk::DescriptorSetLayout samplerLayout = device.createDescriptorSetLayout( vk::DescriptorSetLayoutCreateInfo( vk::DescriptorSetLayoutCreateFlags(), sampler2DBinding ) ); // Create pipeline layout with multiple descriptor sets - std::array descriptorSetLayouts = { { uniformLayout.get(), samplerLayout.get() } }; - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( + std::array descriptorSetLayouts = { { uniformLayout, samplerLayout } }; + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayouts ) ); // Create a single pool to contain data for our two descriptor sets std::array poolSizes = { vk::DescriptorPoolSize( vk::DescriptorType::eUniformBuffer, 1 ), vk::DescriptorPoolSize( vk::DescriptorType::eCombinedImageSampler, 1 ) }; - vk::UniqueDescriptorPool descriptorPool = device->createDescriptorPoolUnique( + vk::DescriptorPool descriptorPool = device.createDescriptorPool( vk::DescriptorPoolCreateInfo( vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 2, poolSizes ) ); // Populate descriptor sets - std::vector descriptorSets = device->allocateDescriptorSetsUnique( - vk::DescriptorSetAllocateInfo( descriptorPool.get(), descriptorSetLayouts ) ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayouts ); + std::vector descriptorSets = device.allocateDescriptorSets( descriptorSetAllocateInfo ); // Populate with info about our uniform buffer - vk::DescriptorBufferInfo uniformBufferInfo( uniformBufferData.buffer.get(), 0, sizeof( glm::mat4x4 ) ); - vk::DescriptorImageInfo textureImageInfo( textureData.textureSampler.get(), - textureData.imageData->imageView.get(), - vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::DescriptorBufferInfo uniformBufferInfo( uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::DescriptorImageInfo textureImageInfo( + textureData.sampler, textureData.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); std::array writeDescriptorSets = { - { vk::WriteDescriptorSet( - descriptorSets[0].get(), 0, 0, vk::DescriptorType::eUniformBuffer, {}, uniformBufferInfo ), - vk::WriteDescriptorSet( - descriptorSets[1].get(), 0, 0, vk::DescriptorType::eCombinedImageSampler, textureImageInfo ) } + { vk::WriteDescriptorSet( descriptorSets[0], 0, 0, vk::DescriptorType::eUniformBuffer, {}, uniformBufferInfo ), + vk::WriteDescriptorSet( descriptorSets[1], 0, 0, vk::DescriptorType::eCombinedImageSampler, textureImageInfo ) } }; - device->updateDescriptorSets( writeDescriptorSets, nullptr ); + device.updateDescriptorSets( writeDescriptorSets, nullptr ); /* VULKAN_KEY_END */ - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); - vk::UniquePipeline graphicsPipeline = + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), sizeof( texturedCubeData[0] ), { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, @@ -217,52 +214,49 @@ int main( int /*argc*/, char ** /*argv*/ ) renderPass ); // Get the index of the next available swapchain image: - vk::UniqueSemaphore imageAcquiredSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); - vk::ResultValue currentBuffer = device->acquireNextImageKHR( - swapChainData.swapChain.get(), vk::su::FenceTimeout, imageAcquiredSemaphore.get(), nullptr ); + vk::Semaphore imageAcquiredSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); + vk::ResultValue currentBuffer = + device.acquireNextImageKHR( swapChainData.swapChain, vk::su::FenceTimeout, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); std::array clearValues; clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, + framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); - commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); - commandBuffer->bindDescriptorSets( vk::PipelineBindPoint::eGraphics, - pipelineLayout.get(), - 0, - { descriptorSets[0].get(), descriptorSets[1].get() }, - nullptr ); + commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSets[0], descriptorSets[1] }, nullptr ); - commandBuffer->bindVertexBuffers( 0, *vertexBufferData.buffer, { 0 } ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.bindVertexBuffers( 0, vertexBufferData.buffer, { 0 } ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer->draw( 12 * 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->end(); + commandBuffer.draw( 12 * 3, 1, 0, 0 ); + commandBuffer.endRenderPass(); + commandBuffer.end(); - vk::UniqueFence drawFence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence drawFence = device.createFence( vk::FenceCreateInfo() ); vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); - graphicsQueue.submit( submitInfo, drawFence.get() ); + vk::SubmitInfo submitInfo( imageAcquiredSemaphore, waitDestinationStageMask, commandBuffer ); + graphicsQueue.submit( submitInfo, drawFence ); - while ( vk::Result::eTimeout == device->waitForFences( drawFence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( drawFence, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -273,7 +267,35 @@ int main( int /*argc*/, char ** /*argv*/ ) } std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); - device->waitIdle(); + device.waitIdle(); + + device.destroyFence( drawFence ); + device.destroySemaphore( imageAcquiredSemaphore ); + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + device.freeDescriptorSets( descriptorPool, descriptorSets ); + device.destroyDescriptorPool( descriptorPool ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( samplerLayout ); + device.destroyDescriptorSetLayout( uniformLayout ); + vertexBufferData.clear( device ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + uniformBufferData.clear( device ); + textureData.clear( device ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/OcclusionQuery/OcclusionQuery.cpp b/samples/OcclusionQuery/OcclusionQuery.cpp index 2eb7036..24c59fa 100644 --- a/samples/OcclusionQuery/OcclusionQuery.cpp +++ b/samples/OcclusionQuery/OcclusionQuery.cpp @@ -32,36 +32,36 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); @@ -69,27 +69,27 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::su::copyToDevice( - device, uniformBufferData.deviceMemory, vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ) ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcMatrix ); - vk::UniqueDescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, depthBufferData.format ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PC_C ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); vk::su::BufferData vertexBufferData( @@ -99,21 +99,20 @@ int main( int /*argc*/, char ** /*argv*/ ) coloredCubeData, sizeof( coloredCubeData ) / sizeof( coloredCubeData[0] ) ); - vk::UniqueDescriptorPool descriptorPool = + vk::DescriptorPool descriptorPool = vk::su::createDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 1 } } ); - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); vk::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, {} } }, {} ); - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); - vk::UniquePipeline graphicsPipeline = vk::su::createGraphicsPipeline( + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), sizeof( coloredCubeData[0] ), { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32B32A32Sfloat, 16 } }, vk::FrontFace::eClockwise, @@ -123,93 +122,91 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_START */ - vk::UniqueSemaphore imageAcquiredSemaphore = - device->createSemaphoreUnique( vk::SemaphoreCreateInfo( vk::SemaphoreCreateFlags() ) ); + vk::Semaphore imageAcquiredSemaphore = + device.createSemaphore( vk::SemaphoreCreateInfo( vk::SemaphoreCreateFlags() ) ); // Get the index of the next available swapchain image: vk::ResultValue currentBuffer = - device->acquireNextImageKHR( swapChainData.swapChain.get(), UINT64_MAX, imageAcquiredSemaphore.get(), nullptr ); + device.acquireNextImageKHR( swapChainData.swapChain, UINT64_MAX, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); /* Allocate a uniform buffer that will take query results. */ - vk::UniqueBuffer queryResultBuffer = device->createBufferUnique( + vk::Buffer queryResultBuffer = device.createBuffer( vk::BufferCreateInfo( vk::BufferCreateFlags(), 4 * sizeof( uint64_t ), vk::BufferUsageFlagBits::eUniformBuffer | vk::BufferUsageFlagBits::eTransferDst ) ); - vk::MemoryRequirements memoryRequirements = device->getBufferMemoryRequirements( queryResultBuffer.get() ); + vk::MemoryRequirements memoryRequirements = device.getBufferMemoryRequirements( queryResultBuffer ); uint32_t memoryTypeIndex = vk::su::findMemoryType( physicalDevice.getMemoryProperties(), memoryRequirements.memoryTypeBits, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ); - vk::UniqueDeviceMemory queryResultMemory = - device->allocateMemoryUnique( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); + vk::DeviceMemory queryResultMemory = + device.allocateMemory( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); - device->bindBufferMemory( queryResultBuffer.get(), queryResultMemory.get(), 0 ); + device.bindBufferMemory( queryResultBuffer, queryResultMemory, 0 ); - vk::UniqueQueryPool queryPool = device->createQueryPoolUnique( vk::QueryPoolCreateInfo( + vk::QueryPool queryPool = device.createQueryPool( vk::QueryPoolCreateInfo( vk::QueryPoolCreateFlags(), vk::QueryType::eOcclusion, 2, vk::QueryPipelineStatisticFlags() ) ); - commandBuffer->begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlags() ) ); - commandBuffer->resetQueryPool( queryPool.get(), 0, 2 ); + commandBuffer.begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlags() ) ); + commandBuffer.resetQueryPool( queryPool, 0, 2 ); std::array clearValues; clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - commandBuffer->beginRenderPass( vk::RenderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), - vk::Rect2D( vk::Offset2D(), surfaceData.extent ), - clearValues ), - vk::SubpassContents::eInline ); + commandBuffer.beginRenderPass( + vk::RenderPassBeginInfo( + renderPass, framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D(), surfaceData.extent ), clearValues ), + vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSet.get(), {} ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, {} ); - commandBuffer->bindVertexBuffers( 0, *vertexBufferData.buffer, { 0 } ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.bindVertexBuffers( 0, vertexBufferData.buffer, { 0 } ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer->beginQuery( queryPool.get(), 0, vk::QueryControlFlags() ); - commandBuffer->endQuery( queryPool.get(), 0 ); + commandBuffer.beginQuery( queryPool, 0, vk::QueryControlFlags() ); + commandBuffer.endQuery( queryPool, 0 ); - commandBuffer->beginQuery( queryPool.get(), 1, vk::QueryControlFlags() ); - commandBuffer->draw( 12 * 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->endQuery( queryPool.get(), 1 ); + commandBuffer.beginQuery( queryPool, 1, vk::QueryControlFlags() ); + commandBuffer.draw( 12 * 3, 1, 0, 0 ); + commandBuffer.endRenderPass(); + commandBuffer.endQuery( queryPool, 1 ); - commandBuffer->copyQueryPoolResults( queryPool.get(), - 0, - 2, - queryResultBuffer.get(), - 0, - sizeof( uint64_t ), - vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait ); - commandBuffer->end(); + commandBuffer.copyQueryPoolResults( queryPool, + 0, + 2, + queryResultBuffer, + 0, + sizeof( uint64_t ), + vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait ); + commandBuffer.end(); - vk::UniqueFence drawFence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence drawFence = device.createFence( vk::FenceCreateInfo() ); vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); - graphicsQueue.submit( submitInfo, drawFence.get() ); + vk::SubmitInfo submitInfo( imageAcquiredSemaphore, waitDestinationStageMask, commandBuffer ); + graphicsQueue.submit( submitInfo, drawFence ); graphicsQueue.waitIdle(); vk::ResultValue> rv = - device->getQueryPoolResults( *queryPool, - 0, - 2, - 2 * sizeof( uint64_t ), - sizeof( uint64_t ), - vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait ); - switch ( rv.result ) + device.getQueryPoolResults( queryPool, + 0, + 2, + 2 * sizeof( uint64_t ), + sizeof( uint64_t ), + vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait ); + switch ( rv.result ) { case vk::Result::eSuccess: break; case vk::Result::eNotReady: @@ -224,18 +221,19 @@ int main( int /*argc*/, char ** /*argv*/ ) /* Read back query result from buffer */ uint64_t * samplesPassedPtr = static_cast( - device->mapMemory( queryResultMemory.get(), 0, memoryRequirements.size, vk::MemoryMapFlags() ) ); + device.mapMemory( queryResultMemory, 0, memoryRequirements.size, vk::MemoryMapFlags() ) ); std::cout << "vkCmdCopyQueryPoolResults data\n"; std::cout << "samples_passed[0] = " << samplesPassedPtr[0] << "\n"; std::cout << "samples_passed[1] = " << samplesPassedPtr[1] << "\n"; - device->unmapMemory( queryResultMemory.get() ); + device.unmapMemory( queryResultMemory ); - while ( vk::Result::eTimeout == device->waitForFences( drawFence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( drawFence, VK_TRUE, vk::su::FenceTimeout ) ) ; - vk::Result result = presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + vk::Result result = + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -246,7 +244,37 @@ int main( int /*argc*/, char ** /*argv*/ ) } std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + device.destroyFence( drawFence ); + device.destroyQueryPool( queryPool ); + device.freeMemory( queryResultMemory ); + device.destroyBuffer( queryResultBuffer ); + device.destroySemaphore( imageAcquiredSemaphore ); + /* VULKAN_KEY_END */ + + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + device.freeDescriptorSets( descriptorPool, descriptorSet ); + device.destroyDescriptorPool( descriptorPool ); + vertexBufferData.clear( device ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + uniformBufferData.clear( device ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/PhysicalDeviceExtensions/PhysicalDeviceExtensions.cpp b/samples/PhysicalDeviceExtensions/PhysicalDeviceExtensions.cpp index 50a6eed..e555326 100644 --- a/samples/PhysicalDeviceExtensions/PhysicalDeviceExtensions.cpp +++ b/samples/PhysicalDeviceExtensions/PhysicalDeviceExtensions.cpp @@ -27,21 +27,22 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif // enumerate the physicalDevices - std::vector physicalDevices = instance->enumeratePhysicalDevices(); + std::vector physicalDevices = instance.enumeratePhysicalDevices(); /* VULKAN_KEY_START */ for ( size_t i = 0; i < physicalDevices.size(); i++ ) { - std::cout << "PhysicalDevice " << i << "\n"; std::vector extensionProperties = physicalDevices[i].enumerateDeviceExtensionProperties(); + std::cout << "PhysicalDevice " << i << " : " << extensionProperties.size() << " extensions:\n"; // sort the extensions alphabetically std::sort( extensionProperties.begin(), @@ -58,6 +59,9 @@ int main( int /*argc*/, char ** /*argv*/ ) } /* VULKAN_KEY_END */ + + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/PhysicalDeviceFeatures/PhysicalDeviceFeatures.cpp b/samples/PhysicalDeviceFeatures/PhysicalDeviceFeatures.cpp index 0e68504..b6aa06e 100644 --- a/samples/PhysicalDeviceFeatures/PhysicalDeviceFeatures.cpp +++ b/samples/PhysicalDeviceFeatures/PhysicalDeviceFeatures.cpp @@ -36,13 +36,14 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, {}, VK_API_VERSION_1_1 ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, {}, VK_API_VERSION_1_1 ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif // enumerate the physicalDevices - std::vector physicalDevices = instance->enumeratePhysicalDevices(); + std::vector physicalDevices = instance.enumeratePhysicalDevices(); /* VULKAN_KEY_START */ @@ -53,7 +54,7 @@ int main( int /*argc*/, char ** /*argv*/ ) std::vector extensionProperties = physicalDevices[i].enumerateDeviceExtensionProperties(); - std::cout << "PhysicalDevice " << i << "\n"; + std::cout << "PhysicalDevice " << i << " :\n"; auto features2 = physicalDevices[i] .getFeatures2 groupProperties = instance->enumeratePhysicalDeviceGroups(); + std::vector groupProperties = instance.enumeratePhysicalDeviceGroups(); std::cout << std::boolalpha; for ( size_t i = 0; i < groupProperties.size(); i++ ) @@ -60,15 +61,14 @@ int main( int /*argc*/, char ** /*argv*/ ) std::vector queueFamilyProperties = physicalDevice.getQueueFamilyProperties(); // get the first index into queueFamiliyProperties which supports graphics - size_t graphicsQueueFamilyIndex = std::distance( - queueFamilyProperties.begin(), - std::find_if( - queueFamilyProperties.begin(), queueFamilyProperties.end(), []( vk::QueueFamilyProperties const & qfp ) { - return qfp.queueFlags & vk::QueueFlagBits::eGraphics; - } ) ); + auto propertyIterator = std::find_if( + queueFamilyProperties.begin(), queueFamilyProperties.end(), []( vk::QueueFamilyProperties const & qfp ) { + return qfp.queueFlags & vk::QueueFlagBits::eGraphics; + } ); + size_t graphicsQueueFamilyIndex = std::distance( queueFamilyProperties.begin(), propertyIterator ); assert( graphicsQueueFamilyIndex < queueFamilyProperties.size() ); - // create a UniqueDevice + // create a Device float queuePriority = 0.0f; vk::DeviceQueueCreateInfo deviceQueueCreateInfo( vk::DeviceQueueCreateFlags(), static_cast( graphicsQueueFamilyIndex ), 1, &queuePriority ); @@ -78,11 +78,17 @@ int main( int /*argc*/, char ** /*argv*/ ) groupProperties[i].physicalDevices ); deviceCreateInfo.pNext = &deviceGroupDeviceCreateInfo; - vk::UniqueDevice device = physicalDevice.createDeviceUnique( deviceCreateInfo ); + vk::Device device = physicalDevice.createDevice( deviceCreateInfo ); + + // ... and destroy it again + device.destroy(); } } /* VULKAN_KEY_END */ + + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/PhysicalDeviceProperties/PhysicalDeviceProperties.cpp b/samples/PhysicalDeviceProperties/PhysicalDeviceProperties.cpp index e8ed92c..09462ff 100644 --- a/samples/PhysicalDeviceProperties/PhysicalDeviceProperties.cpp +++ b/samples/PhysicalDeviceProperties/PhysicalDeviceProperties.cpp @@ -101,13 +101,14 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, {}, VK_API_VERSION_1_1 ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, {}, VK_API_VERSION_1_1 ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif // enumerate the physicalDevices - std::vector physicalDevices = instance->enumeratePhysicalDevices(); + std::vector physicalDevices = instance.enumeratePhysicalDevices(); /* VULKAN_KEY_START */ @@ -1266,6 +1267,9 @@ int main( int /*argc*/, char ** /*argv*/ ) } /* VULKAN_KEY_END */ + + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/PhysicalDeviceQueueFamilyProperties/PhysicalDeviceQueueFamilyProperties.cpp b/samples/PhysicalDeviceQueueFamilyProperties/PhysicalDeviceQueueFamilyProperties.cpp index a597937..cedcfd5 100644 --- a/samples/PhysicalDeviceQueueFamilyProperties/PhysicalDeviceQueueFamilyProperties.cpp +++ b/samples/PhysicalDeviceQueueFamilyProperties/PhysicalDeviceQueueFamilyProperties.cpp @@ -29,13 +29,14 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, {}, VK_API_VERSION_1_1 ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, {}, VK_API_VERSION_1_1 ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif // enumerate the physicalDevices - std::vector physicalDevices = instance->enumeratePhysicalDevices(); + std::vector physicalDevices = instance.enumeratePhysicalDevices(); /* VULKAN_KEY_START */ @@ -87,6 +88,9 @@ int main( int /*argc*/, char ** /*argv*/ ) } /* VULKAN_KEY_END */ + + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/PipelineCache/PipelineCache.cpp b/samples/PipelineCache/PipelineCache.cpp index 28faeb0..1bcb86c 100644 --- a/samples/PipelineCache/PipelineCache.cpp +++ b/samples/PipelineCache/PipelineCache.cpp @@ -71,71 +71,71 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::PhysicalDeviceProperties properties = physicalDevice.getProperties(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); vk::su::DepthBufferData depthBufferData( physicalDevice, device, vk::Format::eD16Unorm, surfaceData.extent ); vk::su::TextureData textureData( physicalDevice, device ); - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); textureData.setImage( device, commandBuffer, vk::su::MonochromeImageGenerator( { 118, 185, 0 } ) ); vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::su::copyToDevice( - device, uniformBufferData.deviceMemory, vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ) ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcMatrix ); - vk::UniqueDescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, depthBufferData.format ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); vk::su::BufferData vertexBufferData( @@ -145,11 +145,10 @@ int main( int /*argc*/, char ** /*argv*/ ) texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); - vk::UniqueDescriptorPool descriptorPool = vk::su::createDescriptorPool( + vk::DescriptorPool descriptorPool = vk::su::createDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 1 }, { vk::DescriptorType::eCombinedImageSampler, 1 } } ); - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); vk::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, {} } }, textureData ); @@ -287,7 +286,7 @@ int main( int /*argc*/, char ** /*argv*/ ) } // Feed the initial cache data into cache creation - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo( vk::PipelineCacheCreateFlags(), startCacheSize, startCacheData ) ); // Free our initialData now that pipeline cache has been created @@ -295,12 +294,12 @@ int main( int /*argc*/, char ** /*argv*/ ) startCacheData = NULL; // Time (roughly) taken to create the graphics pipeline - timestamp_t start = getMilliseconds(); - vk::UniquePipeline graphicsPipeline = + timestamp_t start = getMilliseconds(); + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), sizeof( texturedCubeData[0] ), { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, @@ -310,12 +309,12 @@ int main( int /*argc*/, char ** /*argv*/ ) timestamp_t elapsed = getMilliseconds() - start; std::cout << " vkCreateGraphicsPipeline time: " << (double)elapsed << " ms\n"; - vk::UniqueSemaphore imageAcquiredSemaphore = - device->createSemaphoreUnique( vk::SemaphoreCreateInfo( vk::SemaphoreCreateFlags() ) ); + vk::Semaphore imageAcquiredSemaphore = + device.createSemaphore( vk::SemaphoreCreateInfo( vk::SemaphoreCreateFlags() ) ); // Get the index of the next available swapchain image: vk::ResultValue currentBuffer = - device->acquireNextImageKHR( swapChainData.swapChain.get(), UINT64_MAX, imageAcquiredSemaphore.get(), nullptr ); + device.acquireNextImageKHR( swapChainData.swapChain, UINT64_MAX, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); @@ -323,40 +322,38 @@ int main( int /*argc*/, char ** /*argv*/ ) clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - commandBuffer->beginRenderPass( vk::RenderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), - vk::Rect2D( vk::Offset2D(), surfaceData.extent ), - clearValues ), - vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSet.get(), {} ); + commandBuffer.beginRenderPass( + vk::RenderPassBeginInfo( + renderPass, framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D(), surfaceData.extent ), clearValues ), + vk::SubpassContents::eInline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, {} ); - commandBuffer->bindVertexBuffers( 0, *vertexBufferData.buffer, { 0 } ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.bindVertexBuffers( 0, vertexBufferData.buffer, { 0 } ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer->draw( 12 * 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->end(); + commandBuffer.draw( 12 * 3, 1, 0, 0 ); + commandBuffer.endRenderPass(); + commandBuffer.end(); - vk::UniqueFence drawFence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence drawFence = device.createFence( vk::FenceCreateInfo() ); vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); - graphicsQueue.submit( submitInfo, drawFence.get() ); + vk::SubmitInfo submitInfo( imageAcquiredSemaphore, waitDestinationStageMask, commandBuffer ); + graphicsQueue.submit( submitInfo, drawFence ); - while ( vk::Result::eTimeout == device->waitForFences( drawFence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( drawFence, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -370,7 +367,7 @@ int main( int /*argc*/, char ** /*argv*/ ) // Store away the cache that we've populated. This could conceivably happen // earlier, depends on when the pipeline cache stops being populated // internally. - std::vector endCacheData = device->getPipelineCacheData( pipelineCache.get() ); + std::vector endCacheData = device.getPipelineCacheData( pipelineCache ); // Write the file to disk, overwriting whatever was there std::ofstream writeCacheStream( cacheFileName, std::ios_base::out | std::ios_base::binary ); @@ -387,6 +384,33 @@ int main( int /*argc*/, char ** /*argv*/ ) } /* VULKAN_KEY_END */ + + device.destroyFence( drawFence ); + device.destroySemaphore( imageAcquiredSemaphore ); + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + device.freeDescriptorSets( descriptorPool, descriptorSet ); + device.destroyDescriptorPool( descriptorPool ); + vertexBufferData.clear( device ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + uniformBufferData.clear( device ); + textureData.clear( device ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/PipelineDerivative/PipelineDerivative.cpp b/samples/PipelineDerivative/PipelineDerivative.cpp index c4788f2..c47fd3c 100644 --- a/samples/PipelineDerivative/PipelineDerivative.cpp +++ b/samples/PipelineDerivative/PipelineDerivative.cpp @@ -23,6 +23,7 @@ #else // unknow compiler... just ignore the warnings for yourselves ;) #endif + #include "../utils/geometries.hpp" #include "../utils/math.hpp" #include "../utils/shaders.hpp" @@ -39,70 +40,70 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); vk::su::DepthBufferData depthBufferData( physicalDevice, device, vk::Format::eD16Unorm, surfaceData.extent ); vk::su::TextureData textureData( physicalDevice, device ); - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); textureData.setImage( device, commandBuffer, vk::su::CheckerboardImageGenerator() ); vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::su::copyToDevice( - device, uniformBufferData.deviceMemory, vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ) ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcMatrix ); - vk::UniqueDescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, depthBufferData.format ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); vk::su::BufferData vertexBufferData( @@ -112,16 +113,15 @@ int main( int /*argc*/, char ** /*argv*/ ) texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); - vk::UniqueDescriptorPool descriptorPool = vk::su::createDescriptorPool( + vk::DescriptorPool descriptorPool = vk::su::createDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 1 }, { vk::DescriptorType::eCombinedImageSampler, 1 } } ); - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); vk::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, {} } }, textureData ); - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); /* VULKAN_KEY_START */ @@ -132,9 +132,9 @@ int main( int /*argc*/, char ** /*argv*/ ) std::array pipelineShaderStageCreateInfos = { vk::PipelineShaderStageCreateInfo( - vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eVertex, vertexShaderModule.get(), "main" ), + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eVertex, vertexShaderModule, "main" ), vk::PipelineShaderStageCreateInfo( - vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eFragment, fragmentShaderModule.get(), "main" ) + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eFragment, fragmentShaderModule, "main" ) }; vk::VertexInputBindingDescription vertexInputBindingDescription( 0, sizeof( texturedCubeData[0] ) ); @@ -209,12 +209,12 @@ int main( int /*argc*/, char ** /*argv*/ ) &pipelineDepthStencilStateCreateInfo, &pipelineColorBlendStateCreateInfo, &pipelineDynamicStateCreateInfo, - pipelineLayout.get(), - renderPass.get() ); + pipelineLayout, + renderPass ); - vk::UniquePipeline basePipeline; - vk::ResultValue rvPipeline = - device->createGraphicsPipelineUnique( pipelineCache.get(), graphicsPipelineCreateInfo ); + vk::Pipeline basePipeline; + vk::ResultValue rvPipeline = + device.createGraphicsPipeline( pipelineCache, graphicsPipelineCreateInfo ); switch ( rvPipeline.result ) { case vk::Result::eSuccess: basePipeline = std::move( rvPipeline.value ); break; @@ -241,20 +241,20 @@ void main() // Convert GLSL to SPIR-V glslang::InitializeProcess(); - vk::UniqueShaderModule fragmentShaderModule2 = + vk::ShaderModule fragmentShaderModule2 = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C_2 ); glslang::FinalizeProcess(); // Modify pipeline info to reflect derivation pipelineShaderStageCreateInfos[1] = vk::PipelineShaderStageCreateInfo( - vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eFragment, fragmentShaderModule2.get(), "main" ); + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eFragment, fragmentShaderModule2, "main" ); graphicsPipelineCreateInfo.flags = vk::PipelineCreateFlagBits::eDerivative; - graphicsPipelineCreateInfo.basePipelineHandle = basePipeline.get(); + graphicsPipelineCreateInfo.basePipelineHandle = basePipeline; graphicsPipelineCreateInfo.basePipelineIndex = -1; // And create the derived pipeline - vk::UniquePipeline derivedPipeline; - rvPipeline = device->createGraphicsPipelineUnique( *pipelineCache, graphicsPipelineCreateInfo ); + vk::Pipeline derivedPipeline; + rvPipeline = device.createGraphicsPipeline( pipelineCache, graphicsPipelineCreateInfo ); switch ( rvPipeline.result ) { case vk::Result::eSuccess: derivedPipeline = std::move( rvPipeline.value ); break; @@ -266,12 +266,12 @@ void main() /* VULKAN_KEY_END */ - vk::UniqueSemaphore imageAcquiredSemaphore = - device->createSemaphoreUnique( vk::SemaphoreCreateInfo( vk::SemaphoreCreateFlags() ) ); + vk::Semaphore imageAcquiredSemaphore = + device.createSemaphore( vk::SemaphoreCreateInfo( vk::SemaphoreCreateFlags() ) ); // Get the index of the next available swapchain image vk::ResultValue currentBuffer = - device->acquireNextImageKHR( swapChainData.swapChain.get(), UINT64_MAX, imageAcquiredSemaphore.get(), nullptr ); + device.acquireNextImageKHR( swapChainData.swapChain, UINT64_MAX, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); @@ -279,40 +279,38 @@ void main() clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - commandBuffer->beginRenderPass( vk::RenderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), - vk::Rect2D( vk::Offset2D(), surfaceData.extent ), - clearValues ), - vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, derivedPipeline.get() ); - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSet.get(), {} ); + commandBuffer.beginRenderPass( + vk::RenderPassBeginInfo( + renderPass, framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D(), surfaceData.extent ), clearValues ), + vk::SubpassContents::eInline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, derivedPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, {} ); - commandBuffer->bindVertexBuffers( 0, *vertexBufferData.buffer, { 0 } ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.bindVertexBuffers( 0, vertexBufferData.buffer, { 0 } ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer->draw( 12 * 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->end(); + commandBuffer.draw( 12 * 3, 1, 0, 0 ); + commandBuffer.endRenderPass(); + commandBuffer.end(); - vk::UniqueFence drawFence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence drawFence = device.createFence( vk::FenceCreateInfo() ); vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); - graphicsQueue.submit( submitInfo, drawFence.get() ); + vk::SubmitInfo submitInfo( imageAcquiredSemaphore, waitDestinationStageMask, commandBuffer ); + graphicsQueue.submit( submitInfo, drawFence ); - while ( vk::Result::eTimeout == device->waitForFences( drawFence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( drawFence, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -322,6 +320,35 @@ void main() default: assert( false ); // an unexpected result is returned ! } std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + device.destroyFence( drawFence ); + device.destroySemaphore( imageAcquiredSemaphore ); + device.destroyShaderModule( fragmentShaderModule2 ); + device.destroyPipeline( derivedPipeline ); + device.destroyPipeline( basePipeline ); + device.destroyPipelineCache( pipelineCache ); + device.freeDescriptorSets( descriptorPool, descriptorSet ); + device.destroyDescriptorPool( descriptorPool ); + vertexBufferData.clear( device ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + uniformBufferData.clear( device ); + textureData.clear( device ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/PushConstants/PushConstants.cpp b/samples/PushConstants/PushConstants.cpp index 4bb57ac..11c71d7 100644 --- a/samples/PushConstants/PushConstants.cpp +++ b/samples/PushConstants/PushConstants.cpp @@ -81,36 +81,36 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); @@ -118,22 +118,22 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::su::copyToDevice( - device, uniformBufferData.deviceMemory, vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ) ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcMatrix ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, depthBufferData.format ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); vk::su::BufferData vertexBufferData( @@ -145,32 +145,32 @@ int main( int /*argc*/, char ** /*argv*/ ) // Create binding and layout for the following, matching contents of shader // binding 0 = uniform buffer (MVP) - vk::UniqueDescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); /* VULKAN_KEY_START */ // Set up our push constant range, which mirrors the declaration of - vk::PushConstantRange pushConstantRanges( vk::ShaderStageFlagBits::eFragment, 0, 8 ); - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout, pushConstantRanges ) ); + vk::PushConstantRange pushConstantRanges( vk::ShaderStageFlagBits::eFragment, 0, 8 ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout, pushConstantRanges ) ); // Create a single pool to contain data for our descriptor set std::array poolSizes = { vk::DescriptorPoolSize( vk::DescriptorType::eUniformBuffer, 1 ), vk::DescriptorPoolSize( vk::DescriptorType::eCombinedImageSampler, 1 ) }; - vk::UniqueDescriptorPool descriptorPool = device->createDescriptorPoolUnique( + vk::DescriptorPool descriptorPool = device.createDescriptorPool( vk::DescriptorPoolCreateInfo( vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 1, poolSizes ) ); // Populate descriptor sets - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); // Populate with info about our uniform buffer for MVP - vk::DescriptorBufferInfo bufferInfo( uniformBufferData.buffer.get(), 0, sizeof( glm::mat4x4 ) ); - device->updateDescriptorSets( - vk::WriteDescriptorSet( *descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), {} ); + vk::DescriptorBufferInfo bufferInfo( uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::WriteDescriptorSet writeDescriptorSet( + descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ); + device.updateDescriptorSets( writeDescriptorSet, nullptr ); // Create our push constant data, which matches shader expectations std::array pushConstants = { { (unsigned)2, (unsigned)0x3F800000 } }; @@ -178,18 +178,17 @@ int main( int /*argc*/, char ** /*argv*/ ) // Ensure we have enough room for push constant data assert( ( sizeof( pushConstants ) <= physicalDevice.getProperties().limits.maxPushConstantsSize ) && "Too many push constants" ); - commandBuffer->begin( vk::CommandBufferBeginInfo() ); - commandBuffer->pushConstants( - pipelineLayout.get(), vk::ShaderStageFlagBits::eFragment, 0, pushConstants ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); + commandBuffer.pushConstants( pipelineLayout, vk::ShaderStageFlagBits::eFragment, 0, pushConstants ); /* VULKAN_KEY_END */ - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); - vk::UniquePipeline graphicsPipeline = vk::su::createGraphicsPipeline( + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), sizeof( texturedCubeData[0] ), { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32B32A32Sfloat, 16 } }, vk::FrontFace::eClockwise, @@ -197,9 +196,9 @@ int main( int /*argc*/, char ** /*argv*/ ) pipelineLayout, renderPass ); - vk::UniqueSemaphore imageAcquiredSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); - vk::ResultValue currentBuffer = device->acquireNextImageKHR( - swapChainData.swapChain.get(), vk::su::FenceTimeout, imageAcquiredSemaphore.get(), nullptr ); + vk::Semaphore imageAcquiredSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); + vk::ResultValue currentBuffer = + device.acquireNextImageKHR( swapChainData.swapChain, vk::su::FenceTimeout, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); @@ -207,40 +206,39 @@ int main( int /*argc*/, char ** /*argv*/ ) clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, + framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); - commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSet.get(), nullptr ); + commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, nullptr ); - commandBuffer->bindVertexBuffers( 0, *vertexBufferData.buffer, { 0 } ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.bindVertexBuffers( 0, vertexBufferData.buffer, { 0 } ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer->draw( 12 * 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->end(); + commandBuffer.draw( 12 * 3, 1, 0, 0 ); + commandBuffer.endRenderPass(); + commandBuffer.end(); - vk::UniqueFence drawFence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence drawFence = device.createFence( vk::FenceCreateInfo() ); vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); - graphicsQueue.submit( submitInfo, drawFence.get() ); + vk::SubmitInfo submitInfo( imageAcquiredSemaphore, waitDestinationStageMask, commandBuffer ); + graphicsQueue.submit( submitInfo, drawFence ); - while ( vk::Result::eTimeout == device->waitForFences( drawFence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( drawFence, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -250,6 +248,32 @@ int main( int /*argc*/, char ** /*argv*/ ) default: assert( false ); // an unexpected result is returned ! } std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); + + device.destroyFence( drawFence ); + device.destroySemaphore( imageAcquiredSemaphore ); + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + device.freeDescriptorSets( descriptorPool, descriptorSet ); + device.destroyDescriptorPool( descriptorPool ); + vertexBufferData.clear( device ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + uniformBufferData.clear( device ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/PushDescriptors/PushDescriptors.cpp b/samples/PushDescriptors/PushDescriptors.cpp index 756651f..5d4432c 100644 --- a/samples/PushDescriptors/PushDescriptors.cpp +++ b/samples/PushDescriptors/PushDescriptors.cpp @@ -44,9 +44,11 @@ int main( int /*argc*/, char ** /*argv*/ ) // To use PUSH_DESCRIPTOR, you must also specify GET_PHYSICAL_DEVICE_PROPERTIES_2 std::vector extensionProperties = vk::enumerateInstanceExtensionProperties(); - if ( std::find_if( extensionProperties.begin(), extensionProperties.end(), []( vk::ExtensionProperties ep ) { - return ( strcmp( ep.extensionName, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME ) == 0 ); - } ) == extensionProperties.end() ) + auto propertyIterator = + std::find_if( extensionProperties.begin(), extensionProperties.end(), []( vk::ExtensionProperties ep ) { + return ( strcmp( ep.extensionName, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME ) == 0 ); + } ); + if ( propertyIterator == extensionProperties.end() ) { std::cout << "No GET_PHYSICAL_DEVICE_PROPERTIES_2 extension" << std::endl; return 0; @@ -55,18 +57,21 @@ int main( int /*argc*/, char ** /*argv*/ ) std::vector instanceExtensions = vk::su::getInstanceExtensions(); instanceExtensions.push_back( VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME ); - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, instanceExtensions ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, instanceExtensions ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); // Once instance is created, need to make sure the extension is available extensionProperties = physicalDevice.enumerateDeviceExtensionProperties(); - if ( std::find_if( extensionProperties.begin(), extensionProperties.end(), []( vk::ExtensionProperties ep ) { - return ( strcmp( ep.extensionName, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME ) == 0 ); - } ) == extensionProperties.end() ) + propertyIterator = + std::find_if( extensionProperties.begin(), extensionProperties.end(), []( vk::ExtensionProperties ep ) { + return ( strcmp( ep.extensionName, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME ) == 0 ); + } ); + if ( propertyIterator == extensionProperties.end() ) { std::cout << "No extension for push descriptors" << std::endl; return 0; @@ -78,62 +83,61 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, deviceExtensions ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); vk::su::DepthBufferData depthBufferData( physicalDevice, device, vk::Format::eD16Unorm, surfaceData.extent ); vk::su::TextureData textureData( physicalDevice, device ); - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); textureData.setImage( device, commandBuffer, vk::su::CheckerboardImageGenerator() ); vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::su::copyToDevice( - device, uniformBufferData.deviceMemory, vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ) ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcMatrix ); // Need to specify that descriptor set layout will be for push descriptors - vk::UniqueDescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } }, vk::DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR ); - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, depthBufferData.format ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); vk::su::BufferData vertexBufferData( @@ -143,12 +147,12 @@ int main( int /*argc*/, char ** /*argv*/ ) texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); - vk::UniquePipeline graphicsPipeline = + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), sizeof( texturedCubeData[0] ), { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, @@ -157,60 +161,59 @@ int main( int /*argc*/, char ** /*argv*/ ) renderPass ); // Get the index of the next available swapchain image: - vk::UniqueSemaphore imageAcquiredSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); - vk::ResultValue currentBuffer = device->acquireNextImageKHR( - swapChainData.swapChain.get(), vk::su::FenceTimeout, imageAcquiredSemaphore.get(), nullptr ); + vk::Semaphore imageAcquiredSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); + vk::ResultValue currentBuffer = + device.acquireNextImageKHR( swapChainData.swapChain, vk::su::FenceTimeout, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); std::array clearValues; clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, + framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); - commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); + commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); - vk::DescriptorBufferInfo bufferInfo( uniformBufferData.buffer.get(), 0, sizeof( glm::mat4x4 ) ); - vk::DescriptorImageInfo imageInfo( textureData.textureSampler.get(), - textureData.imageData->imageView.get(), - vk::ImageLayout::eShaderReadOnlyOptimal ); - vk::WriteDescriptorSet writeDescriptorSets[2] = { + vk::DescriptorBufferInfo bufferInfo( uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::DescriptorImageInfo imageInfo( + textureData.sampler, textureData.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::WriteDescriptorSet writeDescriptorSets[2] = { vk::WriteDescriptorSet( {}, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), vk::WriteDescriptorSet( {}, 1, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo ) }; // this call is from an extension and needs the dynamic dispatcher !! - commandBuffer->pushDescriptorSetKHR( - vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { 2, writeDescriptorSets } ); + commandBuffer.pushDescriptorSetKHR( + vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { 2, writeDescriptorSets } ); - commandBuffer->bindVertexBuffers( 0, *vertexBufferData.buffer, { 0 } ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.bindVertexBuffers( 0, vertexBufferData.buffer, { 0 } ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer->draw( 12 * 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->end(); + commandBuffer.draw( 12 * 3, 1, 0, 0 ); + commandBuffer.endRenderPass(); + commandBuffer.end(); - vk::UniqueFence drawFence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence drawFence = device.createFence( vk::FenceCreateInfo() ); vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); - graphicsQueue.submit( submitInfo, drawFence.get() ); + vk::SubmitInfo submitInfo( imageAcquiredSemaphore, waitDestinationStageMask, commandBuffer ); + graphicsQueue.submit( submitInfo, drawFence ); - while ( vk::Result::eTimeout == device->waitForFences( drawFence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( drawFence, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -223,7 +226,32 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_END */ - device->waitIdle(); + device.waitIdle(); + + device.destroyFence( drawFence ); + device.destroySemaphore( imageAcquiredSemaphore ); + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + vertexBufferData.clear( device ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + textureData.clear( device ); + uniformBufferData.clear( device ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/RayTracing/RayTracing.cpp b/samples/RayTracing/RayTracing.cpp index 12f2746..8408033 100644 --- a/samples/RayTracing/RayTracing.cpp +++ b/samples/RayTracing/RayTracing.cpp @@ -81,16 +81,33 @@ static_assert( sizeof( GeometryInstanceData ) == 64, "GeometryInstanceData struc struct AccelerationStructureData { - vk::UniqueAccelerationStructureNV acclerationStructure; + void clear( vk::Device device ) + { + device.destroyAccelerationStructureNV( accelerationStructure ); + if ( scratchBufferData ) + { + scratchBufferData->clear( device ); + } + if ( resultBufferData ) + { + resultBufferData->clear( device ); + } + if ( instanceBufferData ) + { + instanceBufferData->clear( device ); + } + } + + vk::AccelerationStructureNV accelerationStructure; std::unique_ptr scratchBufferData; std::unique_ptr resultBufferData; std::unique_ptr instanceBufferData; }; AccelerationStructureData - createAccelerationStructureData( vk::PhysicalDevice const & physicalDevice, - vk::UniqueDevice const & device, - vk::UniqueCommandBuffer const & commandBuffer, + createAccelerationStructureData( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::CommandBuffer const & commandBuffer, std::vector> const & instances, std::vector const & geometries ) { @@ -102,13 +119,13 @@ AccelerationStructureData instances.empty() ? vk::AccelerationStructureTypeNV::eBottomLevel : vk::AccelerationStructureTypeNV::eTopLevel; vk::AccelerationStructureInfoNV accelerationStructureInfo( accelerationStructureType, {}, vk::su::checked_cast( instances.size() ), geometries ); - accelerationStructureData.acclerationStructure = device->createAccelerationStructureNVUnique( - vk::AccelerationStructureCreateInfoNV( 0, accelerationStructureInfo ) ); + accelerationStructureData.accelerationStructure = + device.createAccelerationStructureNV( vk::AccelerationStructureCreateInfoNV( 0, accelerationStructureInfo ) ); vk::AccelerationStructureMemoryRequirementsInfoNV objectRequirements( - vk::AccelerationStructureMemoryRequirementsTypeNV::eObject, *accelerationStructureData.acclerationStructure ); + vk::AccelerationStructureMemoryRequirementsTypeNV::eObject, accelerationStructureData.accelerationStructure ); vk::DeviceSize resultSizeInBytes = - device->getAccelerationStructureMemoryRequirementsNV( objectRequirements ).memoryRequirements.size; + device.getAccelerationStructureMemoryRequirementsNV( objectRequirements ).memoryRequirements.size; assert( 0 < resultSizeInBytes ); accelerationStructureData.resultBufferData = std::unique_ptr( new vk::su::BufferData( physicalDevice, @@ -118,13 +135,13 @@ AccelerationStructureData vk::MemoryPropertyFlagBits::eDeviceLocal ) ); vk::AccelerationStructureMemoryRequirementsInfoNV buildScratchRequirements( - vk::AccelerationStructureMemoryRequirementsTypeNV::eBuildScratch, *accelerationStructureData.acclerationStructure ); + vk::AccelerationStructureMemoryRequirementsTypeNV::eBuildScratch, accelerationStructureData.accelerationStructure ); vk::AccelerationStructureMemoryRequirementsInfoNV updateScratchRequirements( vk::AccelerationStructureMemoryRequirementsTypeNV::eUpdateScratch, - *accelerationStructureData.acclerationStructure ); + accelerationStructureData.accelerationStructure ); vk::DeviceSize scratchSizeInBytes = std::max( - device->getAccelerationStructureMemoryRequirementsNV( buildScratchRequirements ).memoryRequirements.size, - device->getAccelerationStructureMemoryRequirementsNV( updateScratchRequirements ).memoryRequirements.size ); + device.getAccelerationStructureMemoryRequirementsNV( buildScratchRequirements ).memoryRequirements.size, + device.getAccelerationStructureMemoryRequirementsNV( updateScratchRequirements ).memoryRequirements.size ); assert( 0 < scratchSizeInBytes ); accelerationStructureData.scratchBufferData = @@ -145,14 +162,14 @@ AccelerationStructureData std::vector geometryInstanceData; for ( size_t i = 0; i < instances.size(); i++ ) { - uint64_t accelerationStructureHandle = device->getAccelerationStructureHandleNV( instances[i].first ); + uint64_t accelerationStructureHandle = device.getAccelerationStructureHandleNV( instances[i].first ); // For each instance we set its instance index to its index i in the instance vector, and set // its hit group index to 2*i. The hit group index defines which entry of the shader binding // table will contain the hit group to be executed when hitting this instance. We set this // index to 2*i due to the use of 2 types of rays in the scene: the camera rays and the shadow // rays. For each instance, the SBT will then have 2 hit groups - geometryInstanceData.push_back( + geometryInstanceData.emplace_back( GeometryInstanceData( glm::transpose( instances[i].second ), static_cast( i ), 0xFF, @@ -163,40 +180,49 @@ AccelerationStructureData accelerationStructureData.instanceBufferData->upload( device, geometryInstanceData ); } - device->bindAccelerationStructureMemoryNV( vk::BindAccelerationStructureMemoryInfoNV( - *accelerationStructureData.acclerationStructure, *accelerationStructureData.resultBufferData->deviceMemory ) ); + device.bindAccelerationStructureMemoryNV( vk::BindAccelerationStructureMemoryInfoNV( + accelerationStructureData.accelerationStructure, accelerationStructureData.resultBufferData->deviceMemory ) ); - commandBuffer->buildAccelerationStructureNV( + commandBuffer.buildAccelerationStructureNV( vk::AccelerationStructureInfoNV( accelerationStructureType, {}, vk::su::checked_cast( instances.size() ), geometries ), - accelerationStructureData.instanceBufferData ? *accelerationStructureData.instanceBufferData->buffer : nullptr, + accelerationStructureData.instanceBufferData ? accelerationStructureData.instanceBufferData->buffer : nullptr, 0, false, - *accelerationStructureData.acclerationStructure, + accelerationStructureData.accelerationStructure, nullptr, - *accelerationStructureData.scratchBufferData->buffer, + accelerationStructureData.scratchBufferData->buffer, 0 ); - commandBuffer->pipelineBarrier( vk::PipelineStageFlagBits::eAccelerationStructureBuildNV, - vk::PipelineStageFlagBits::eAccelerationStructureBuildNV, - {}, - vk::MemoryBarrier( vk::AccessFlagBits::eAccelerationStructureWriteNV | - vk::AccessFlagBits::eAccelerationStructureReadNV, - vk::AccessFlagBits::eAccelerationStructureWriteNV | - vk::AccessFlagBits::eAccelerationStructureReadNV ), - {}, - {} ); + commandBuffer.pipelineBarrier( vk::PipelineStageFlagBits::eAccelerationStructureBuildNV, + vk::PipelineStageFlagBits::eAccelerationStructureBuildNV, + {}, + vk::MemoryBarrier( vk::AccessFlagBits::eAccelerationStructureWriteNV | + vk::AccessFlagBits::eAccelerationStructureReadNV, + vk::AccessFlagBits::eAccelerationStructureWriteNV | + vk::AccessFlagBits::eAccelerationStructureReadNV ), + {}, + {} ); return accelerationStructureData; } struct PerFrameData { - vk::UniqueCommandPool commandPool; - vk::UniqueCommandBuffer commandBuffer; - vk::UniqueFence fence; - vk::UniqueSemaphore presentCompleteSemaphore; - vk::UniqueSemaphore renderCompleteSemaphore; + void clear( vk::Device device ) + { + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroyFence( fence ); + device.destroySemaphore( presentCompleteSemaphore ); + device.destroySemaphore( renderCompleteSemaphore ); + } + + vk::CommandPool commandPool; + vk::CommandBuffer commandBuffer; + vk::Fence fence; + vk::Semaphore presentCompleteSemaphore; + vk::Semaphore renderCompleteSemaphore; }; struct UniformBufferObject @@ -709,17 +735,18 @@ int main( int /*argc*/, char ** /*argv*/ ) } instanceExtensions.push_back( VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME ); - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, instanceExtensions ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, instanceExtensions ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); // Create Window Surface (using glfw) vk::SurfaceKHR surface; - VkResult err = - glfwCreateWindowSurface( VkInstance( *instance ), window, nullptr, reinterpret_cast( &surface ) ); + VkResult err = glfwCreateWindowSurface( + static_cast( instance ), window, nullptr, reinterpret_cast( &surface ) ); check_vk_result( err ); std::pair graphicsAndPresentQueueFamilyIndex = @@ -728,7 +755,7 @@ int main( int /*argc*/, char ** /*argv*/ ) // Create a Device with ray tracing support (besides some other extensions needed) and needed features auto supportedFeatures = physicalDevice.getFeatures2(); - vk::UniqueDevice device = + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, { VK_KHR_SWAPCHAIN_EXTENSION_NAME, @@ -741,20 +768,19 @@ int main( int /*argc*/, char ** /*argv*/ ) std::array perFrameData; for ( int i = 0; i < IMGUI_VK_QUEUED_FRAMES; i++ ) { - perFrameData[i].commandPool = device->createCommandPoolUnique( vk::CommandPoolCreateInfo( + perFrameData[i].commandPool = device.createCommandPool( vk::CommandPoolCreateInfo( vk::CommandPoolCreateFlagBits::eResetCommandBuffer, graphicsAndPresentQueueFamilyIndex.first ) ); - perFrameData[i].commandBuffer = - std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - *perFrameData[i].commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); - perFrameData[i].fence = device->createFenceUnique( vk::FenceCreateInfo( vk::FenceCreateFlagBits::eSignaled ) ); - perFrameData[i].presentCompleteSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); - perFrameData[i].renderCompleteSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); + perFrameData[i].commandBuffer = device + .allocateCommandBuffers( vk::CommandBufferAllocateInfo( + perFrameData[i].commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); + perFrameData[i].fence = device.createFence( vk::FenceCreateInfo( vk::FenceCreateFlagBits::eSignaled ) ); + perFrameData[i].presentCompleteSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); + perFrameData[i].renderCompleteSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); } - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); // create a descriptor pool with a number of available descriptors std::vector poolSizes = { @@ -762,7 +788,7 @@ int main( int /*argc*/, char ** /*argv*/ ) { vk::DescriptorType::eUniformBuffer, 1000 }, { vk::DescriptorType::eStorageBuffer, 1000 }, }; - vk::UniqueDescriptorPool descriptorPool = vk::su::createDescriptorPool( device, poolSizes ); + vk::DescriptorPool descriptorPool = vk::su::createDescriptorPool( device, poolSizes ); // setup swap chain, render pass, depth buffer and the frame buffers vk::su::SwapChainData swapChainData( physicalDevice, @@ -770,17 +796,17 @@ int main( int /*argc*/, char ** /*argv*/ ) surface, windowExtent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eStorage, - vk::UniqueSwapchainKHR(), + nullptr, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); vk::SurfaceFormatKHR surfaceFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surface ) ); vk::Format depthFormat = vk::su::pickDepthFormat( physicalDevice ); // setup a render pass - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( device, surfaceFormat.format, depthFormat ); + vk::RenderPass renderPass = vk::su::createRenderPass( device, surfaceFormat.format, depthFormat ); - vk::su::DepthBufferData depthBufferData( physicalDevice, device, depthFormat, windowExtent ); - std::vector framebuffers = vk::su::createFramebuffers( + vk::su::DepthBufferData depthBufferData( physicalDevice, device, depthFormat, windowExtent ); + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, windowExtent ); bool samplerAnisotropy = !!supportedFeatures.get().features.samplerAnisotropy; @@ -791,16 +817,16 @@ int main( int /*argc*/, char ** /*argv*/ ) textures.reserve( textureCount ); for ( size_t i = 0; i < textureCount; i++ ) { - textures.push_back( vk::su::TextureData( physicalDevice, - device, - { random( 2, 8 ) * 16, random( 2, 8 ) * 16 }, - vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled, - {}, - samplerAnisotropy, - true ) ); + textures.emplace_back( physicalDevice, + device, + vk::Extent2D( random( 2, 8 ) * 16, random( 2, 8 ) * 16 ), + vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled, + vk::FormatFeatureFlags(), + samplerAnisotropy, + true ); } vk::su::oneTimeSubmit( - device, perFrameData[0].commandPool, graphicsQueue, [&]( vk::UniqueCommandBuffer const & commandBuffer ) { + device, perFrameData[0].commandPool, graphicsQueue, [&]( vk::CommandBuffer const & commandBuffer ) { for ( auto & t : textures ) { t.setImage( @@ -873,7 +899,7 @@ int main( int /*argc*/, char ** /*argv*/ ) glm::mat4x4 transform( glm::mat4x4( 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f ) ); - vk::UniqueDescriptorSetLayout descriptorSetLayout = + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, { vk::DescriptorType::eStorageBuffer, @@ -882,21 +908,21 @@ int main( int /*argc*/, char ** /*argv*/ ) { vk::DescriptorType::eCombinedImageSampler, static_cast( textures.size() ), vk::ShaderStageFlagBits::eFragment } } ); - vk::UniquePipelineLayout pipelineLayout = - device->createPipelineLayoutUnique( vk::PipelineLayoutCreateInfo( {}, *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = + device.createPipelineLayout( vk::PipelineLayoutCreateInfo( {}, descriptorSetLayout ) ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText ); glslang::FinalizeProcess(); - vk::UniquePipeline graphicsPipeline = vk::su::createGraphicsPipeline( + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, - vk::UniquePipelineCache(), - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), + {}, + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), VertexStride, { { vk::Format::eR32G32B32Sfloat, vk::su::checked_cast( offsetof( Vertex, pos ) ) }, { vk::Format::eR32G32B32Sfloat, vk::su::checked_cast( offsetof( Vertex, nrm ) ) }, @@ -910,9 +936,8 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( UniformBufferObject ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); vk::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, {} }, @@ -924,13 +949,13 @@ int main( int /*argc*/, char ** /*argv*/ ) // create acceleration structures: one top-level, and just one bottom-level AccelerationStructureData topLevelAS, bottomLevelAS; vk::su::oneTimeSubmit( - device, perFrameData[0].commandPool, graphicsQueue, [&]( vk::UniqueCommandBuffer const & commandBuffer ) { - vk::GeometryDataNV geometryDataNV( vk::GeometryTrianglesNV( *vertexBufferData.buffer, + device, perFrameData[0].commandPool, graphicsQueue, [&]( vk::CommandBuffer const & commandBuffer ) { + vk::GeometryDataNV geometryDataNV( vk::GeometryTrianglesNV( vertexBufferData.buffer, 0, vk::su::checked_cast( vertices.size() ), VertexStride, vk::Format::eR32G32B32Sfloat, - *indexBufferData.buffer, + indexBufferData.buffer, 0, vk::su::checked_cast( indices.size() ), vk::IndexType::eUint32 ), @@ -946,91 +971,89 @@ int main( int /*argc*/, char ** /*argv*/ ) createAccelerationStructureData( physicalDevice, device, commandBuffer, - { std::make_pair( *bottomLevelAS.acclerationStructure, transform ) }, + { std::make_pair( bottomLevelAS.accelerationStructure, transform ) }, std::vector() ); } ); // create raytracing descriptor set vk::su::oneTimeSubmit( - device, perFrameData[0].commandPool, graphicsQueue, [&]( vk::UniqueCommandBuffer const & commandBuffer ) { + device, perFrameData[0].commandPool, graphicsQueue, [&]( vk::CommandBuffer const & commandBuffer ) { vk::BufferMemoryBarrier bufferMemoryBarrier( {}, vk::AccessFlagBits::eShaderRead, VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, - *vertexBufferData.buffer, + vertexBufferData.buffer, 0, VK_WHOLE_SIZE ); - commandBuffer->pipelineBarrier( vk::PipelineStageFlagBits::eAllCommands, - vk::PipelineStageFlagBits::eAllCommands, - {}, - nullptr, - bufferMemoryBarrier, - nullptr ); + commandBuffer.pipelineBarrier( vk::PipelineStageFlagBits::eAllCommands, + vk::PipelineStageFlagBits::eAllCommands, + {}, + nullptr, + bufferMemoryBarrier, + nullptr ); - bufferMemoryBarrier.buffer = *indexBufferData.buffer; - commandBuffer->pipelineBarrier( vk::PipelineStageFlagBits::eAllCommands, - vk::PipelineStageFlagBits::eAllCommands, - {}, - nullptr, - bufferMemoryBarrier, - nullptr ); + bufferMemoryBarrier.buffer = indexBufferData.buffer; + commandBuffer.pipelineBarrier( vk::PipelineStageFlagBits::eAllCommands, + vk::PipelineStageFlagBits::eAllCommands, + {}, + nullptr, + bufferMemoryBarrier, + nullptr ); } ); std::vector bindings; - bindings.push_back( - vk::DescriptorSetLayoutBinding( 0, - vk::DescriptorType::eAccelerationStructureNV, - 1, - vk::ShaderStageFlagBits::eRaygenNV | vk::ShaderStageFlagBits::eClosestHitNV ) ); - bindings.push_back( vk::DescriptorSetLayoutBinding( - 1, vk::DescriptorType::eStorageImage, 1, vk::ShaderStageFlagBits::eRaygenNV ) ); // raytracing output - bindings.push_back( vk::DescriptorSetLayoutBinding( - 2, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eRaygenNV ) ); // camera information - bindings.push_back( vk::DescriptorSetLayoutBinding( - 3, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eClosestHitNV ) ); // vertex buffer - bindings.push_back( vk::DescriptorSetLayoutBinding( - 4, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eClosestHitNV ) ); // index buffer - bindings.push_back( vk::DescriptorSetLayoutBinding( - 5, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eClosestHitNV ) ); // material buffer - bindings.push_back( vk::DescriptorSetLayoutBinding( 6, - vk::DescriptorType::eCombinedImageSampler, - vk::su::checked_cast( textures.size() ), - vk::ShaderStageFlagBits::eClosestHitNV ) ); // textures + bindings.emplace_back( 0, + vk::DescriptorType::eAccelerationStructureNV, + 1, + vk::ShaderStageFlagBits::eRaygenNV | vk::ShaderStageFlagBits::eClosestHitNV ); + bindings.emplace_back( + 1, vk::DescriptorType::eStorageImage, 1, vk::ShaderStageFlagBits::eRaygenNV ); // raytracing output + bindings.emplace_back( + 2, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eRaygenNV ); // camera information + bindings.emplace_back( + 3, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eClosestHitNV ); // vertex buffer + bindings.emplace_back( + 4, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eClosestHitNV ); // index buffer + bindings.emplace_back( + 5, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eClosestHitNV ); // material buffer + bindings.emplace_back( 6, + vk::DescriptorType::eCombinedImageSampler, + vk::su::checked_cast( textures.size() ), + vk::ShaderStageFlagBits::eClosestHitNV ); // textures std::vector descriptorPoolSizes; descriptorPoolSizes.reserve( bindings.size() ); for ( const auto & b : bindings ) { - descriptorPoolSizes.push_back( vk::DescriptorPoolSize( - b.descriptorType, vk::su::checked_cast( swapChainData.images.size() ) * b.descriptorCount ) ); + descriptorPoolSizes.emplace_back( + b.descriptorType, vk::su::checked_cast( swapChainData.images.size() ) * b.descriptorCount ); } vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo( vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, vk::su::checked_cast( swapChainData.images.size() ), descriptorPoolSizes ); - vk::UniqueDescriptorPool rayTracingDescriptorPool = device->createDescriptorPoolUnique( descriptorPoolCreateInfo ); - vk::UniqueDescriptorSetLayout rayTracingDescriptorSetLayout = - device->createDescriptorSetLayoutUnique( vk::DescriptorSetLayoutCreateInfo( {}, bindings ) ); + vk::DescriptorPool rayTracingDescriptorPool = device.createDescriptorPool( descriptorPoolCreateInfo ); + vk::DescriptorSetLayout rayTracingDescriptorSetLayout = + device.createDescriptorSetLayout( vk::DescriptorSetLayoutCreateInfo( {}, bindings ) ); std::vector layouts; for ( size_t i = 0; i < swapChainData.images.size(); i++ ) { - layouts.push_back( *rayTracingDescriptorSetLayout ); + layouts.push_back( rayTracingDescriptorSetLayout ); } - vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( *rayTracingDescriptorPool, layouts ); - std::vector rayTracingDescriptorSets = - device->allocateDescriptorSetsUnique( descriptorSetAllocateInfo ); + descriptorSetAllocateInfo = vk::DescriptorSetAllocateInfo( rayTracingDescriptorPool, layouts ); + std::vector rayTracingDescriptorSets = + device.allocateDescriptorSets( descriptorSetAllocateInfo ); // Bind ray tracing specific descriptor sets into pNext of a vk::WriteDescriptorSet vk::WriteDescriptorSetAccelerationStructureNV writeDescriptorSetAcceleration( 1, - &*topLevelAS.acclerationStructure ); + &topLevelAS.accelerationStructure ); std::vector accelerationDescriptionSets; for ( size_t i = 0; i < rayTracingDescriptorSets.size(); i++ ) { - accelerationDescriptionSets.push_back( - vk::WriteDescriptorSet( *rayTracingDescriptorSets[i], 0, 0, 1, bindings[0].descriptorType ) ); + accelerationDescriptionSets.emplace_back( rayTracingDescriptorSets[i], 0, 0, 1, bindings[0].descriptorType ); accelerationDescriptionSets.back().pNext = &writeDescriptorSetAcceleration; } - device->updateDescriptorSets( accelerationDescriptionSets, {} ); + device.updateDescriptorSets( accelerationDescriptionSets, nullptr ); // Bind all the other buffers and images, starting with dstBinding == 2 (dstBinding == 1 is used by the backBuffer // view) @@ -1048,13 +1071,13 @@ int main( int /*argc*/, char ** /*argv*/ ) // create the ray-tracing shader modules glslang::InitializeProcess(); - vk::UniqueShaderModule raygenShaderModule = + vk::ShaderModule raygenShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eRaygenNV, raygenShaderText ); - vk::UniqueShaderModule missShaderModule = + vk::ShaderModule missShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eMissNV, missShaderText ); - vk::UniqueShaderModule shadowMissShaderModule = + vk::ShaderModule shadowMissShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eMissNV, shadowMissShaderText ); - vk::UniqueShaderModule closestHitShaderModule = + vk::ShaderModule closestHitShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eClosestHitNV, closestHitShaderText ); glslang::FinalizeProcess(); @@ -1063,46 +1086,46 @@ int main( int /*argc*/, char ** /*argv*/ ) std::vector shaderGroups; // We use only one ray generation, that will implement the camera model - shaderStages.push_back( - vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eRaygenNV, *raygenShaderModule, "main" ) ); - shaderGroups.push_back( vk::RayTracingShaderGroupCreateInfoNV( - vk::RayTracingShaderGroupTypeNV::eGeneral, 0, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV ) ); + shaderStages.emplace_back( + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eRaygenNV, raygenShaderModule, "main" ); + shaderGroups.emplace_back( + vk::RayTracingShaderGroupTypeNV::eGeneral, 0, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV ); // The first miss shader is used to look-up the environment in case the rays from the camera miss the geometry - shaderStages.push_back( - vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eMissNV, *missShaderModule, "main" ) ); - shaderGroups.push_back( vk::RayTracingShaderGroupCreateInfoNV( - vk::RayTracingShaderGroupTypeNV::eGeneral, 1, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV ) ); + shaderStages.emplace_back( + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eMissNV, missShaderModule, "main" ); + shaderGroups.emplace_back( + vk::RayTracingShaderGroupTypeNV::eGeneral, 1, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV ); // The second miss shader is invoked when a shadow ray misses the geometry. It simply indicates that no occlusion // has been found - shaderStages.push_back( - vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eMissNV, *shadowMissShaderModule, "main" ) ); - shaderGroups.push_back( vk::RayTracingShaderGroupCreateInfoNV( - vk::RayTracingShaderGroupTypeNV::eGeneral, 2, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV ) ); + shaderStages.emplace_back( + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eMissNV, shadowMissShaderModule, "main" ); + shaderGroups.emplace_back( + vk::RayTracingShaderGroupTypeNV::eGeneral, 2, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV, VK_SHADER_UNUSED_NV ); // The first hit group defines the shaders invoked when a ray shot from the camera hit the geometry. In this case we // only specify the closest hit shader, and rely on the build-in triangle intersection and pass-through any-hit // shader. However, explicit intersection and any hit shaders could be added as well. - shaderStages.push_back( vk::PipelineShaderStageCreateInfo( - {}, vk::ShaderStageFlagBits::eClosestHitNV, *closestHitShaderModule, "main" ) ); - shaderGroups.push_back( vk::RayTracingShaderGroupCreateInfoNV( vk::RayTracingShaderGroupTypeNV::eTrianglesHitGroup, - VK_SHADER_UNUSED_NV, - 3, - VK_SHADER_UNUSED_NV, - VK_SHADER_UNUSED_NV ) ); + shaderStages.emplace_back( + vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eClosestHitNV, closestHitShaderModule, "main" ); + shaderGroups.emplace_back( vk::RayTracingShaderGroupTypeNV::eTrianglesHitGroup, + VK_SHADER_UNUSED_NV, + 3, + VK_SHADER_UNUSED_NV, + VK_SHADER_UNUSED_NV ); // The second hit group defines the shaders invoked when a shadow ray hits the geometry. For simple shadows we do // not need any shader in that group: we will rely on initializing the payload and update it only in the miss shader - shaderGroups.push_back( vk::RayTracingShaderGroupCreateInfoNV( vk::RayTracingShaderGroupTypeNV::eTrianglesHitGroup, - VK_SHADER_UNUSED_NV, - VK_SHADER_UNUSED_NV, - VK_SHADER_UNUSED_NV, - VK_SHADER_UNUSED_NV ) ); + shaderGroups.emplace_back( vk::RayTracingShaderGroupTypeNV::eTrianglesHitGroup, + VK_SHADER_UNUSED_NV, + VK_SHADER_UNUSED_NV, + VK_SHADER_UNUSED_NV, + VK_SHADER_UNUSED_NV ); // Create the layout of the pipeline following the provided descriptor set layout - vk::UniquePipelineLayout rayTracingPipelineLayout = - device->createPipelineLayoutUnique( vk::PipelineLayoutCreateInfo( {}, *rayTracingDescriptorSetLayout ) ); + vk::PipelineLayout rayTracingPipelineLayout = + device.createPipelineLayout( vk::PipelineLayoutCreateInfo( {}, rayTracingDescriptorSetLayout ) ); // Assemble the shader stages and recursion depth info into the raytracing pipeline // The ray tracing process can shoot rays from the camera, and a shadow ray can be shot from the @@ -1111,13 +1134,13 @@ int main( int /*argc*/, char ** /*argv*/ ) // in the ray generation to avoid deep recursion. uint32_t maxRecursionDepth = 2; vk::RayTracingPipelineCreateInfoNV rayTracingPipelineCreateInfo( - {}, shaderStages, shaderGroups, maxRecursionDepth, *rayTracingPipelineLayout ); - vk::UniquePipeline rayTracingPipeline; - vk::ResultValue rvPipeline = - device->createRayTracingPipelineNVUnique( nullptr, rayTracingPipelineCreateInfo ); + {}, shaderStages, shaderGroups, maxRecursionDepth, rayTracingPipelineLayout ); + vk::Pipeline rayTracingPipeline; + vk::ResultValue rvPipeline = + device.createRayTracingPipelineNV( nullptr, rayTracingPipelineCreateInfo ); switch ( rvPipeline.result ) { - case vk::Result::eSuccess: rayTracingPipeline = std::move( rvPipeline.value ); break; + case vk::Result::eSuccess: rayTracingPipeline = rvPipeline.value; break; case vk::Result::ePipelineCompileRequiredEXT: // something meaningfull here break; @@ -1144,12 +1167,12 @@ int main( int /*argc*/, char ** /*argv*/ ) uint32_t shaderBindingTableSize = hitShaderBindingOffset + hitShaderTableSize; std::vector shaderHandleStorage( shaderBindingTableSize ); - (void)device->getRayTracingShaderGroupHandlesNV( - *rayTracingPipeline, 0, 1, raygenShaderTableSize, &shaderHandleStorage[raygenShaderBindingOffset] ); - (void)device->getRayTracingShaderGroupHandlesNV( - *rayTracingPipeline, 1, 2, missShaderTableSize, &shaderHandleStorage[missShaderBindingOffset] ); - (void)device->getRayTracingShaderGroupHandlesNV( - *rayTracingPipeline, 3, 2, hitShaderTableSize, &shaderHandleStorage[hitShaderBindingOffset] ); + (void)device.getRayTracingShaderGroupHandlesNV( + rayTracingPipeline, 0, 1, raygenShaderTableSize, &shaderHandleStorage[raygenShaderBindingOffset] ); + (void)device.getRayTracingShaderGroupHandlesNV( + rayTracingPipeline, 1, 2, missShaderTableSize, &shaderHandleStorage[missShaderBindingOffset] ); + (void)device.getRayTracingShaderGroupHandlesNV( + rayTracingPipeline, 3, 2, hitShaderTableSize, &shaderHandleStorage[hitShaderBindingOffset] ); vk::su::BufferData shaderBindingTableBufferData( physicalDevice, device, @@ -1175,7 +1198,7 @@ int main( int /*argc*/, char ** /*argv*/ ) double startTime = glfwGetTime(); glfwPollEvents(); - vk::UniqueCommandBuffer const & commandBuffer = perFrameData[frameIndex].commandBuffer; + vk::CommandBuffer const & commandBuffer = perFrameData[frameIndex].commandBuffer; int w, h; glfwGetWindowSize( window, &w, &h ); @@ -1183,7 +1206,7 @@ int main( int /*argc*/, char ** /*argv*/ ) { windowExtent.width = w; windowExtent.height = h; - device->waitIdle(); + device.waitIdle(); swapChainData = vk::su::SwapChainData( physicalDevice, device, @@ -1196,9 +1219,9 @@ int main( int /*argc*/, char ** /*argv*/ ) depthBufferData = vk::su::DepthBufferData( physicalDevice, device, vk::su::pickDepthFormat( physicalDevice ), windowExtent ); - vk::su::oneTimeSubmit( commandBuffer, graphicsQueue, [&]( vk::UniqueCommandBuffer const & commandBuffer ) { + vk::su::oneTimeSubmit( commandBuffer, graphicsQueue, [&]( vk::CommandBuffer const & commandBuffer ) { vk::su::setImageLayout( commandBuffer, - *depthBufferData.image, + depthBufferData.image, depthFormat, vk::ImageLayout::eUndefined, vk::ImageLayout::eDepthStencilAttachmentOptimal ); @@ -1219,53 +1242,50 @@ int main( int /*argc*/, char ** /*argv*/ ) uniformBufferData.upload( device, uniformBufferObject ); // frame begin - vk::ResultValue rv = device->acquireNextImageKHR( - *swapChainData.swapChain, UINT64_MAX, *perFrameData[frameIndex].presentCompleteSemaphore, nullptr ); + vk::ResultValue rv = device.acquireNextImageKHR( + swapChainData.swapChain, UINT64_MAX, perFrameData[frameIndex].presentCompleteSemaphore, nullptr ); assert( rv.result == vk::Result::eSuccess ); uint32_t backBufferIndex = rv.value; while ( vk::Result::eTimeout == - device->waitForFences( *perFrameData[frameIndex].fence, VK_TRUE, vk::su::FenceTimeout ) ) + device.waitForFences( perFrameData[frameIndex].fence, VK_TRUE, vk::su::FenceTimeout ) ) ; - device->resetFences( *perFrameData[frameIndex].fence ); + device.resetFences( perFrameData[frameIndex].fence ); - commandBuffer->begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlagBits::eOneTimeSubmit ) ); + commandBuffer.begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlagBits::eOneTimeSubmit ) ); if ( appInfo.useRasterRender ) { - commandBuffer->beginRenderPass( vk::RenderPassBeginInfo( *renderPass, - *framebuffers[backBufferIndex], - vk::Rect2D( vk::Offset2D( 0, 0 ), windowExtent ), - clearValues ), - vk::SubpassContents::eInline ); + commandBuffer.beginRenderPass( + vk::RenderPassBeginInfo( + renderPass, framebuffers[backBufferIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), windowExtent ), clearValues ), + vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, *descriptorSet, nullptr ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, nullptr ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( windowExtent.width ), - static_cast( windowExtent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), windowExtent ) ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( windowExtent.width ), + static_cast( windowExtent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), windowExtent ) ); - commandBuffer->bindVertexBuffers( 0, *vertexBufferData.buffer, { 0 } ); - commandBuffer->bindIndexBuffer( *indexBufferData.buffer, 0, vk::IndexType::eUint32 ); - commandBuffer->drawIndexed( vk::su::checked_cast( indices.size() ), 1, 0, 0, 0 ); + commandBuffer.bindVertexBuffers( 0, vertexBufferData.buffer, { 0 } ); + commandBuffer.bindIndexBuffer( indexBufferData.buffer, 0, vk::IndexType::eUint32 ); + commandBuffer.drawIndexed( vk::su::checked_cast( indices.size() ), 1, 0, 0, 0 ); - commandBuffer->endRenderPass(); + commandBuffer.endRenderPass(); } else { vk::DescriptorImageInfo imageInfo( - nullptr, *swapChainData.imageViews[backBufferIndex], vk::ImageLayout::eGeneral ); - device->updateDescriptorSets( - vk::WriteDescriptorSet( - *rayTracingDescriptorSets[backBufferIndex], 1, 0, bindings[1].descriptorType, imageInfo ), - {} ); + nullptr, swapChainData.imageViews[backBufferIndex], vk::ImageLayout::eGeneral ); + vk::WriteDescriptorSet writeDescriptorSet( + rayTracingDescriptorSets[backBufferIndex], 1, 0, bindings[1].descriptorType, imageInfo ); + device.updateDescriptorSets( writeDescriptorSet, nullptr ); vk::su::setImageLayout( commandBuffer, swapChainData.images[backBufferIndex], @@ -1273,28 +1293,28 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::ImageLayout::eUndefined, vk::ImageLayout::eGeneral ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eRayTracingNV, *rayTracingPipeline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eRayTracingNV, rayTracingPipeline ); - commandBuffer->bindDescriptorSets( vk::PipelineBindPoint::eRayTracingNV, - *rayTracingPipelineLayout, - 0, - *rayTracingDescriptorSets[backBufferIndex], - nullptr ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eRayTracingNV, + rayTracingPipelineLayout, + 0, + rayTracingDescriptorSets[backBufferIndex], + nullptr ); - commandBuffer->traceRaysNV( *shaderBindingTableBufferData.buffer, - raygenShaderBindingOffset, - *shaderBindingTableBufferData.buffer, - missShaderBindingOffset, - missShaderBindingStride, - *shaderBindingTableBufferData.buffer, - hitShaderBindingOffset, - hitShaderBindingStride, - nullptr, - 0, - 0, - windowExtent.width, - windowExtent.height, - 1 ); + commandBuffer.traceRaysNV( shaderBindingTableBufferData.buffer, + raygenShaderBindingOffset, + shaderBindingTableBufferData.buffer, + missShaderBindingOffset, + missShaderBindingStride, + shaderBindingTableBufferData.buffer, + hitShaderBindingOffset, + hitShaderBindingStride, + nullptr, + 0, + 0, + windowExtent.width, + windowExtent.height, + 1 ); vk::su::setImageLayout( commandBuffer, swapChainData.images[backBufferIndex], @@ -1304,18 +1324,18 @@ int main( int /*argc*/, char ** /*argv*/ ) } // frame end - commandBuffer->end(); + commandBuffer.end(); const vk::PipelineStageFlags waitDstStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput; graphicsQueue.submit( vk::SubmitInfo( 1, - &( *perFrameData[frameIndex].presentCompleteSemaphore ), + &( perFrameData[frameIndex].presentCompleteSemaphore ), &waitDstStageMask, 1, - &( *commandBuffer ), + &commandBuffer, 1, - &( *perFrameData[frameIndex].renderCompleteSemaphore ) ), - *perFrameData[frameIndex].fence ); + &( perFrameData[frameIndex].renderCompleteSemaphore ) ), + perFrameData[frameIndex].fence ); vk::Result result = presentQueue.presentKHR( vk::PresentInfoKHR( - *perFrameData[frameIndex].renderCompleteSemaphore, *swapChainData.swapChain, backBufferIndex ) ); + perFrameData[frameIndex].renderCompleteSemaphore, swapChainData.swapChain, backBufferIndex ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -1345,9 +1365,51 @@ int main( int /*argc*/, char ** /*argv*/ ) } // Cleanup - device->waitIdle(); - swapChainData.swapChain.reset(); // need to reset swapChain before destroying the surface ! - VULKAN_HPP_DEFAULT_DISPATCHER.vkDestroySurfaceKHR( VkInstance( *instance ), VkSurfaceKHR( surface ), nullptr ); + device.waitIdle(); + + shaderBindingTableBufferData.clear( device ); + device.destroyPipeline( rayTracingPipeline ); + device.destroyPipelineLayout( rayTracingPipelineLayout ); + device.destroyShaderModule( closestHitShaderModule ); + device.destroyShaderModule( shadowMissShaderModule ); + device.destroyShaderModule( missShaderModule ); + device.destroyShaderModule( raygenShaderModule ); + device.freeDescriptorSets( rayTracingDescriptorPool, rayTracingDescriptorSets ); + device.destroyDescriptorSetLayout( rayTracingDescriptorSetLayout ); + device.destroyDescriptorPool( rayTracingDescriptorPool ); + topLevelAS.clear( device ); + bottomLevelAS.clear( device ); + device.freeDescriptorSets( descriptorPool, descriptorSet ); + uniformBufferData.clear( device ); + device.destroyPipeline( graphicsPipeline ); + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + indexBufferData.clear( device ); + vertexBufferData.clear( device ); + materialBufferData.clear( device ); + for ( auto & texture : textures ) + { + texture.clear( device ); + } + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + depthBufferData.clear( device ); + device.destroyRenderPass( renderPass ); + swapChainData.clear( device ); + device.destroyDescriptorPool( descriptorPool ); + for ( int i = 0; i < IMGUI_VK_QUEUED_FRAMES; i++ ) + { + perFrameData[i].clear( device ); + } + device.destroy(); + instance.destroySurfaceKHR( surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); + glfwDestroyWindow( window ); glfwTerminate(); } diff --git a/samples/SecondaryCommandBuffer/SecondaryCommandBuffer.cpp b/samples/SecondaryCommandBuffer/SecondaryCommandBuffer.cpp index a0fa515..b8cda71 100644 --- a/samples/SecondaryCommandBuffer/SecondaryCommandBuffer.cpp +++ b/samples/SecondaryCommandBuffer/SecondaryCommandBuffer.cpp @@ -42,36 +42,36 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); @@ -79,31 +79,31 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::su::copyToDevice( - device, uniformBufferData.deviceMemory, vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ) ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcMatrix ); - vk::UniqueDescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, depthBufferData.format, vk::AttachmentLoadOp::eClear, vk::ImageLayout::eColorAttachmentOptimal ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); vk::su::BufferData vertexBufferData( @@ -113,13 +113,13 @@ int main( int /*argc*/, char ** /*argv*/ ) texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); - vk::UniquePipeline graphicsPipeline = + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), sizeof( texturedCubeData[0] ), { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, @@ -127,7 +127,7 @@ int main( int /*argc*/, char ** /*argv*/ ) pipelineLayout, renderPass ); - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); vk::su::TextureData greenTextureData( physicalDevice, device ); greenTextureData.setImage( device, commandBuffer, vk::su::MonochromeImageGenerator( { 118, 185, 0 } ) ); @@ -136,12 +136,12 @@ int main( int /*argc*/, char ** /*argv*/ ) checkeredTextureData.setImage( device, commandBuffer, vk::su::CheckerboardImageGenerator() ); // create two identical descriptor sets, each with a different texture but identical UBOs - vk::UniqueDescriptorPool descriptorPool = vk::su::createDescriptorPool( + vk::DescriptorPool descriptorPool = vk::su::createDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 2 }, { vk::DescriptorType::eCombinedImageSampler, 2 } } ); - std::array layouts = { descriptorSetLayout.get(), descriptorSetLayout.get() }; - std::vector descriptorSets = - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( descriptorPool.get(), layouts ) ); + std::array layouts = { descriptorSetLayout, descriptorSetLayout }; + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, layouts ); + std::vector descriptorSets = device.allocateDescriptorSets( descriptorSetAllocateInfo ); assert( descriptorSets.size() == 2 ); vk::su::updateDescriptorSets( device, @@ -156,13 +156,13 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_START */ // create four secondary command buffers, for each quadrant of the screen - std::vector secondaryCommandBuffers = device->allocateCommandBuffersUnique( - vk::CommandBufferAllocateInfo( commandPool.get(), vk::CommandBufferLevel::eSecondary, 4 ) ); + std::vector secondaryCommandBuffers = device.allocateCommandBuffers( + vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::eSecondary, 4 ) ); // Get the index of the next available swapchain image: - vk::UniqueSemaphore imageAcquiredSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); - vk::ResultValue currentBuffer = device->acquireNextImageKHR( - swapChainData.swapChain.get(), vk::su::FenceTimeout, imageAcquiredSemaphore.get(), nullptr ); + vk::Semaphore imageAcquiredSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); + vk::ResultValue currentBuffer = + device.acquireNextImageKHR( swapChainData.swapChain, vk::su::FenceTimeout, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); @@ -177,9 +177,8 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::Rect2D scissor( vk::Offset2D( 0, 0 ), vk::Extent2D( surfaceData.extent ) ); // now we record four separate command buffers, one for each quadrant of the screen - vk::CommandBufferInheritanceInfo commandBufferInheritanceInfo( - renderPass.get(), 0, framebuffers[currentBuffer.value].get() ); - vk::CommandBufferBeginInfo secondaryBeginInfo( vk::CommandBufferUsageFlagBits::eOneTimeSubmit | + vk::CommandBufferInheritanceInfo commandBufferInheritanceInfo( renderPass, 0, framebuffers[currentBuffer.value] ); + vk::CommandBufferBeginInfo secondaryBeginInfo( vk::CommandBufferUsageFlagBits::eOneTimeSubmit | vk::CommandBufferUsageFlagBits::eRenderPassContinue, &commandBufferInheritanceInfo ); @@ -188,30 +187,30 @@ int main( int /*argc*/, char ** /*argv*/ ) viewport.x = 25.0f + 250.0f * ( i % 2 ); viewport.y = 25.0f + 250.0f * ( i / 2 ); - secondaryCommandBuffers[i]->begin( secondaryBeginInfo ); - secondaryCommandBuffers[i]->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); - secondaryCommandBuffers[i]->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSets[i == 0 || i == 3].get(), nullptr ); - secondaryCommandBuffers[i]->bindVertexBuffers( 0, vertexBufferData.buffer.get(), offset ); - secondaryCommandBuffers[i]->setViewport( 0, viewport ); - secondaryCommandBuffers[i]->setScissor( 0, scissor ); - secondaryCommandBuffers[i]->draw( 12 * 3, 1, 0, 0 ); - secondaryCommandBuffers[i]->end(); + secondaryCommandBuffers[i].begin( secondaryBeginInfo ); + secondaryCommandBuffers[i].bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + secondaryCommandBuffers[i].bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSets[i == 0 || i == 3], nullptr ); + secondaryCommandBuffers[i].bindVertexBuffers( 0, vertexBufferData.buffer, offset ); + secondaryCommandBuffers[i].setViewport( 0, viewport ); + secondaryCommandBuffers[i].setScissor( 0, scissor ); + secondaryCommandBuffers[i].draw( 12 * 3, 1, 0, 0 ); + secondaryCommandBuffers[i].end(); } std::array clearValues; clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, + framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); // specifying VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS means this render pass may ONLY call // vkCmdExecuteCommands - commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eSecondaryCommandBuffers ); - commandBuffer->executeCommands( vk::uniqueToRaw( secondaryCommandBuffers ) ); - commandBuffer->endRenderPass(); + commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eSecondaryCommandBuffers ); + commandBuffer.executeCommands( secondaryCommandBuffers ); + commandBuffer.endRenderPass(); vk::ImageMemoryBarrier prePresentBarrier( vk::AccessFlagBits::eColorAttachmentWrite, @@ -222,25 +221,25 @@ int main( int /*argc*/, char ** /*argv*/ ) VK_QUEUE_FAMILY_IGNORED, swapChainData.images[currentBuffer.value], vk::ImageSubresourceRange( vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 ) ); - commandBuffer->pipelineBarrier( vk::PipelineStageFlagBits::eColorAttachmentOutput, - vk::PipelineStageFlagBits::eBottomOfPipe, - vk::DependencyFlags(), - nullptr, - nullptr, - prePresentBarrier ); - commandBuffer->end(); + commandBuffer.pipelineBarrier( vk::PipelineStageFlagBits::eColorAttachmentOutput, + vk::PipelineStageFlagBits::eBottomOfPipe, + vk::DependencyFlags(), + nullptr, + nullptr, + prePresentBarrier ); + commandBuffer.end(); - vk::UniqueFence drawFence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence drawFence = device.createFence( vk::FenceCreateInfo() ); vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); - graphicsQueue.submit( submitInfo, drawFence.get() ); + vk::SubmitInfo submitInfo( imageAcquiredSemaphore, waitDestinationStageMask, commandBuffer ); + graphicsQueue.submit( submitInfo, drawFence ); - while ( vk::Result::eTimeout == device->waitForFences( drawFence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( drawFence, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -253,7 +252,35 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_END */ - device->waitIdle(); + device.waitIdle(); + + device.destroyFence( drawFence ); + device.destroySemaphore( imageAcquiredSemaphore ); + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + device.freeDescriptorSets( descriptorPool, descriptorSets ); + device.destroyDescriptorPool( descriptorPool ); + vertexBufferData.clear( device ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + uniformBufferData.clear( device ); + checkeredTextureData.clear( device ); + greenTextureData.clear( device ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/SeparateImageSampler/SeparateImageSampler.cpp b/samples/SeparateImageSampler/SeparateImageSampler.cpp index 9892b39..e0cbbe1 100644 --- a/samples/SeparateImageSampler/SeparateImageSampler.cpp +++ b/samples/SeparateImageSampler/SeparateImageSampler.cpp @@ -70,36 +70,36 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); @@ -107,23 +107,23 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::su::copyToDevice( - device, uniformBufferData.deviceMemory, vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ) ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcMatrix ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, depthBufferData.format, vk::AttachmentLoadOp::eClear ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderTextTS_T_C ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); vk::su::BufferData vertexBufferData( @@ -135,29 +135,28 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_START */ - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); // Create the separate image vk::su::TextureData textureData( physicalDevice, device ); textureData.setImage( device, commandBuffer, vk::su::MonochromeImageGenerator( { 118, 185, 0 } ) ); // Create the separate sampler - vk::UniqueSampler sampler = - device->createSamplerUnique( vk::SamplerCreateInfo( vk::SamplerCreateFlags(), - vk::Filter::eNearest, - vk::Filter::eNearest, - vk::SamplerMipmapMode::eNearest, - vk::SamplerAddressMode::eClampToEdge, - vk::SamplerAddressMode::eClampToEdge, - vk::SamplerAddressMode::eClampToEdge, - 0.0f, - false, - 1.0f, - false, - vk::CompareOp::eNever, - 0.0f, - 0.0f, - vk::BorderColor::eFloatOpaqueWhite ) ); + vk::Sampler sampler = device.createSampler( vk::SamplerCreateInfo( vk::SamplerCreateFlags(), + vk::Filter::eNearest, + vk::Filter::eNearest, + vk::SamplerMipmapMode::eNearest, + vk::SamplerAddressMode::eClampToEdge, + vk::SamplerAddressMode::eClampToEdge, + vk::SamplerAddressMode::eClampToEdge, + 0.0f, + false, + 1.0f, + false, + vk::CompareOp::eNever, + 0.0f, + 0.0f, + vk::BorderColor::eFloatOpaqueWhite ) ); // Create binding and layout for the following, matching contents of shader // binding 0 = uniform buffer (MVP) @@ -168,12 +167,12 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::DescriptorSetLayoutBinding( 1, vk::DescriptorType::eSampledImage, 1, vk::ShaderStageFlagBits::eFragment ), vk::DescriptorSetLayoutBinding( 2, vk::DescriptorType::eSampler, 1, vk::ShaderStageFlagBits::eFragment ) } }; - vk::UniqueDescriptorSetLayout descriptorSetLayout = device->createDescriptorSetLayoutUnique( + vk::DescriptorSetLayout descriptorSetLayout = device.createDescriptorSetLayout( vk::DescriptorSetLayoutCreateInfo( vk::DescriptorSetLayoutCreateFlags(), resourceBindings ) ); // Create pipeline layout - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); // Create a single pool to contain data for the descriptor set std::array poolSizes = { @@ -181,35 +180,33 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::DescriptorPoolSize( vk::DescriptorType::eSampledImage, 1 ), vk::DescriptorPoolSize( vk::DescriptorType::eSampler, 1 ) } }; - vk::UniqueDescriptorPool descriptorPool = device->createDescriptorPoolUnique( + vk::DescriptorPool descriptorPool = device.createDescriptorPool( vk::DescriptorPoolCreateInfo( vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 1, poolSizes ) ); // Populate descriptor sets - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); - vk::DescriptorBufferInfo bufferInfo( uniformBufferData.buffer.get(), 0, sizeof( glm::mat4x4 ) ); - vk::DescriptorImageInfo imageInfo( textureData.textureSampler.get(), - textureData.imageData->imageView.get(), - vk::ImageLayout::eShaderReadOnlyOptimal ); - vk::DescriptorImageInfo samplerInfo( sampler.get(), {}, {} ); + vk::DescriptorBufferInfo bufferInfo( uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::DescriptorImageInfo imageInfo( + textureData.sampler, textureData.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::DescriptorImageInfo samplerInfo( sampler, {}, {} ); std::array descriptorWrites = { - { vk::WriteDescriptorSet( *descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), - vk::WriteDescriptorSet( *descriptorSet, 1, 0, vk::DescriptorType::eSampledImage, imageInfo ), - vk::WriteDescriptorSet( *descriptorSet, 2, 0, vk::DescriptorType::eSampler, samplerInfo ) } + { vk::WriteDescriptorSet( descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), + vk::WriteDescriptorSet( descriptorSet, 1, 0, vk::DescriptorType::eSampledImage, imageInfo ), + vk::WriteDescriptorSet( descriptorSet, 2, 0, vk::DescriptorType::eSampler, samplerInfo ) } }; - device->updateDescriptorSets( descriptorWrites, nullptr ); + device.updateDescriptorSets( descriptorWrites, nullptr ); /* VULKAN_KEY_END */ - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); - vk::UniquePipeline graphicsPipeline = + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), sizeof( texturedCubeData[0] ), { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, @@ -218,9 +215,9 @@ int main( int /*argc*/, char ** /*argv*/ ) renderPass ); // Get the index of the next available swapchain image: - vk::UniqueSemaphore imageAcquiredSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); - vk::ResultValue currentBuffer = device->acquireNextImageKHR( - swapChainData.swapChain.get(), vk::su::FenceTimeout, imageAcquiredSemaphore.get(), nullptr ); + vk::Semaphore imageAcquiredSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); + vk::ResultValue currentBuffer = + device.acquireNextImageKHR( swapChainData.swapChain, vk::su::FenceTimeout, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); @@ -228,41 +225,40 @@ int main( int /*argc*/, char ** /*argv*/ ) clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, + framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); - commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSet.get(), nullptr ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, nullptr ); - commandBuffer->bindVertexBuffers( 0, *vertexBufferData.buffer, { 0 } ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.bindVertexBuffers( 0, vertexBufferData.buffer, { 0 } ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer->draw( 12 * 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->end(); + commandBuffer.draw( 12 * 3, 1, 0, 0 ); + commandBuffer.endRenderPass(); + commandBuffer.end(); - vk::UniqueFence drawFence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence drawFence = device.createFence( vk::FenceCreateInfo() ); vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); - graphicsQueue.submit( submitInfo, drawFence.get() ); + vk::SubmitInfo submitInfo( imageAcquiredSemaphore, waitDestinationStageMask, commandBuffer ); + graphicsQueue.submit( submitInfo, drawFence ); - while ( vk::Result::eTimeout == device->waitForFences( drawFence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( drawFence, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -273,7 +269,35 @@ int main( int /*argc*/, char ** /*argv*/ ) } std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); - device->waitIdle(); + device.waitIdle(); + + device.destroyFence( drawFence ); + device.destroySemaphore( imageAcquiredSemaphore ); + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + device.freeDescriptorSets( descriptorPool, descriptorSet ); + device.destroyDescriptorPool( descriptorPool ); + vertexBufferData.clear( device ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + uniformBufferData.clear( device ); + textureData.clear( device ); + device.destroySampler( sampler ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/SurfaceCapabilities/SurfaceCapabilities.cpp b/samples/SurfaceCapabilities/SurfaceCapabilities.cpp index 9893acb..a3320fa 100644 --- a/samples/SurfaceCapabilities/SurfaceCapabilities.cpp +++ b/samples/SurfaceCapabilities/SurfaceCapabilities.cpp @@ -67,12 +67,11 @@ int main( int /*argc*/, char ** /*argv*/ ) #endif std::vector instanceExtensionProperties = vk::enumerateInstanceExtensionProperties(); - bool supportsGetSurfaceCapabilities2 = - ( std::find_if( instanceExtensionProperties.begin(), - instanceExtensionProperties.end(), - []( vk::ExtensionProperties const & ep ) { - return strcmp( ep.extensionName, VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME ) == 0; - } ) != instanceExtensionProperties.end() ); + auto propertyIterator = std::find_if( + instanceExtensionProperties.begin(), instanceExtensionProperties.end(), []( vk::ExtensionProperties const & ep ) { + return strcmp( ep.extensionName, VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME ) == 0; + } ); + bool supportsGetSurfaceCapabilities2 = ( propertyIterator != instanceExtensionProperties.end() ); std::vector extensions = vk::su::getInstanceExtensions(); if ( supportsGetSurfaceCapabilities2 ) @@ -80,13 +79,14 @@ int main( int /*argc*/, char ** /*argv*/ ) extensions.push_back( VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME ); } - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, extensions ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, extensions ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif // enumerate the physicalDevices - std::vector physicalDevices = instance->enumeratePhysicalDevices(); + std::vector physicalDevices = instance.enumeratePhysicalDevices(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); @@ -108,7 +108,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::DisplayNativeHdrSurfaceCapabilitiesAMD, vk::SharedPresentSurfaceCapabilitiesKHR, vk::SurfaceCapabilitiesFullScreenExclusiveEXT, - vk::SurfaceProtectedCapabilitiesKHR>( *surfaceData.surface ); + vk::SurfaceProtectedCapabilitiesKHR>( surfaceData.surface ); vk::SurfaceCapabilitiesKHR const & surfaceCapabilities = surfaceCapabilities2.get().surfaceCapabilities; @@ -159,12 +159,16 @@ int main( int /*argc*/, char ** /*argv*/ ) else { vk::SurfaceCapabilitiesKHR surfaceCapabilities = - physicalDevices[i].getSurfaceCapabilitiesKHR( *surfaceData.surface ); + physicalDevices[i].getSurfaceCapabilitiesKHR( surfaceData.surface ); cout( surfaceCapabilities ); } } /* VULKAN_KEY_END */ + + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/SurfaceFormats/SurfaceFormats.cpp b/samples/SurfaceFormats/SurfaceFormats.cpp index cc9112b..10d63c4 100644 --- a/samples/SurfaceFormats/SurfaceFormats.cpp +++ b/samples/SurfaceFormats/SurfaceFormats.cpp @@ -29,13 +29,14 @@ int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif // enumerate the physicalDevices - std::vector physicalDevices = instance->enumeratePhysicalDevices(); + std::vector physicalDevices = instance.enumeratePhysicalDevices(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); @@ -46,7 +47,7 @@ int main( int /*argc*/, char ** /*argv*/ ) { std::cout << "PhysicalDevice " << i << "\n"; std::vector surfaceFormats = - physicalDevices[i].getSurfaceFormatsKHR( *surfaceData.surface ); + physicalDevices[i].getSurfaceFormatsKHR( surfaceData.surface ); for ( size_t j = 0; j < surfaceFormats.size(); j++ ) { std::cout << "\tFormat " << j << "\n"; @@ -59,6 +60,10 @@ int main( int /*argc*/, char ** /*argv*/ ) } /* VULKAN_KEY_END */ + + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/Template/Template.cpp b/samples/Template/Template.cpp index f6c804d..eff16ac 100644 --- a/samples/Template/Template.cpp +++ b/samples/Template/Template.cpp @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// VulkanHpp Samples : Template -// Template sample to start from. Draw textured cube with mostly helpers. +// VulkanHpp Samples : DrawTexturedCube +// Draw a textured cube #include "../utils/geometries.hpp" #include "../utils/math.hpp" @@ -25,43 +25,43 @@ #include #include -static char const * AppName = "Template"; +static char const * AppName = "DrawTexturedCube"; static char const * EngineName = "Vulkan.hpp"; int main( int /*argc*/, char ** /*argv*/ ) { try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); @@ -69,34 +69,34 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::TextureData textureData( physicalDevice, device ); - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); textureData.setImage( device, commandBuffer, vk::su::CheckerboardImageGenerator() ); vk::su::BufferData uniformBufferData( physicalDevice, device, sizeof( glm::mat4x4 ), vk::BufferUsageFlagBits::eUniformBuffer ); - vk::su::copyToDevice( - device, uniformBufferData.deviceMemory, vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ) ); + glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); + vk::su::copyToDevice( device, uniformBufferData.deviceMemory, mvpcMatrix ); - vk::UniqueDescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex }, { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, depthBufferData.format ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PT_T ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_T_C ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( + std::vector framebuffers = vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, depthBufferData.imageView, surfaceData.extent ); vk::su::BufferData vertexBufferData( @@ -106,20 +106,20 @@ int main( int /*argc*/, char ** /*argv*/ ) texturedCubeData, sizeof( texturedCubeData ) / sizeof( texturedCubeData[0] ) ); - vk::UniqueDescriptorPool descriptorPool = vk::su::createDescriptorPool( + vk::DescriptorPool descriptorPool = vk::su::createDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 1 }, { vk::DescriptorType::eCombinedImageSampler, 1 } } ); - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); + vk::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, {} } }, textureData ); - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); - vk::UniquePipeline graphicsPipeline = + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), sizeof( texturedCubeData[0] ), { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, @@ -128,50 +128,48 @@ int main( int /*argc*/, char ** /*argv*/ ) renderPass ); // Get the index of the next available swapchain image: - vk::UniqueSemaphore imageAcquiredSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); - vk::ResultValue currentBuffer = device->acquireNextImageKHR( - swapChainData.swapChain.get(), vk::su::FenceTimeout, imageAcquiredSemaphore.get(), nullptr ); + vk::Semaphore imageAcquiredSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); + vk::ResultValue currentBuffer = + device.acquireNextImageKHR( swapChainData.swapChain, vk::su::FenceTimeout, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); std::array clearValues; clearValues[0].color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, + framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, nullptr ); - commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSet.get(), nullptr ); + commandBuffer.bindVertexBuffers( 0, vertexBufferData.buffer, { 0 } ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer->bindVertexBuffers( 0, *vertexBufferData.buffer, { 0 } ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.draw( 12 * 3, 1, 0, 0 ); + commandBuffer.endRenderPass(); + commandBuffer.end(); - commandBuffer->draw( 12 * 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->end(); - - vk::UniqueFence drawFence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence drawFence = device.createFence( vk::FenceCreateInfo() ); vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); - graphicsQueue.submit( submitInfo, drawFence.get() ); + vk::SubmitInfo submitInfo( imageAcquiredSemaphore, waitDestinationStageMask, commandBuffer ); + graphicsQueue.submit( submitInfo, drawFence ); - while ( vk::Result::eTimeout == device->waitForFences( drawFence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( drawFence, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -182,7 +180,36 @@ int main( int /*argc*/, char ** /*argv*/ ) } std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); - device->waitIdle(); + /* VULKAN_KEY_END */ + + device.waitIdle(); + + device.destroyFence( drawFence ); + device.destroySemaphore( imageAcquiredSemaphore ); + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + device.freeDescriptorSets( descriptorPool, descriptorSet ); + device.destroyDescriptorPool( descriptorPool ); + vertexBufferData.clear( device ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + uniformBufferData.clear( device ); + textureData.clear( device ); + depthBufferData.clear( device ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/TexelBuffer/TexelBuffer.cpp b/samples/TexelBuffer/TexelBuffer.cpp index 70bff5a..3683214 100644 --- a/samples/TexelBuffer/TexelBuffer.cpp +++ b/samples/TexelBuffer/TexelBuffer.cpp @@ -59,12 +59,13 @@ int main( int /*argc*/, char ** /*argv*/ ) try { - vk::UniqueInstance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); + vk::Instance instance = vk::su::createInstance( AppName, EngineName, {}, vk::su::getInstanceExtensions() ); #if !defined( NDEBUG ) - vk::UniqueDebugUtilsMessengerEXT debugUtilsMessenger = vk::su::createDebugUtilsMessenger( instance ); + vk::DebugUtilsMessengerEXT debugUtilsMessenger = + instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); #endif - vk::PhysicalDevice physicalDevice = instance->enumeratePhysicalDevices().front(); + vk::PhysicalDevice physicalDevice = instance.enumeratePhysicalDevices().front(); vk::PhysicalDeviceProperties physicalDeviceProperties = physicalDevice.getProperties(); if ( physicalDeviceProperties.limits.maxTexelBufferElements < 4 ) @@ -84,26 +85,25 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 500, 500 ) ); std::pair graphicsAndPresentQueueFamilyIndex = - vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, *surfaceData.surface ); - vk::UniqueDevice device = + vk::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); + vk::Device device = vk::su::createDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::UniqueCommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); - vk::UniqueCommandBuffer commandBuffer = std::move( device - ->allocateCommandBuffersUnique( vk::CommandBufferAllocateInfo( - commandPool.get(), vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandPool commandPool = vk::su::createCommandPool( device, graphicsAndPresentQueueFamilyIndex.first ); + vk::CommandBuffer commandBuffer = + device.allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); - vk::Queue graphicsQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); - vk::Queue presentQueue = device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); + vk::Queue graphicsQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ); + vk::Queue presentQueue = device.getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ); vk::su::SwapChainData swapChainData( physicalDevice, device, - *surfaceData.surface, + surfaceData.surface, surfaceData.extent, vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - vk::UniqueSwapchainKHR(), + {}, graphicsAndPresentQueueFamilyIndex.first, graphicsAndPresentQueueFamilyIndex.second ); @@ -111,100 +111,97 @@ int main( int /*argc*/, char ** /*argv*/ ) physicalDevice, device, sizeof( texels ), vk::BufferUsageFlagBits::eUniformTexelBuffer ); texelBufferData.upload( device, texels ); - vk::UniqueBufferView texelBufferView = device->createBufferViewUnique( - vk::BufferViewCreateInfo( {}, *texelBufferData.buffer, texelFormat, 0, sizeof( texels ) ) ); + vk::BufferView texelBufferView = device.createBufferView( + vk::BufferViewCreateInfo( {}, texelBufferData.buffer, texelFormat, 0, sizeof( texels ) ) ); - vk::UniqueDescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( + vk::DescriptorSetLayout descriptorSetLayout = vk::su::createDescriptorSetLayout( device, { { vk::DescriptorType::eUniformTexelBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); - vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( - vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); + vk::PipelineLayout pipelineLayout = device.createPipelineLayout( + vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), descriptorSetLayout ) ); - vk::UniqueRenderPass renderPass = vk::su::createRenderPass( + vk::RenderPass renderPass = vk::su::createRenderPass( device, - vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface.get() ) ).format, + vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format, vk::Format::eUndefined ); glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = + vk::ShaderModule vertexShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText ); - vk::UniqueShaderModule fragmentShaderModule = + vk::ShaderModule fragmentShaderModule = vk::su::createShaderModule( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C ); glslang::FinalizeProcess(); - std::vector framebuffers = vk::su::createFramebuffers( - device, renderPass, swapChainData.imageViews, vk::UniqueImageView(), surfaceData.extent ); + std::vector framebuffers = + vk::su::createFramebuffers( device, renderPass, swapChainData.imageViews, vk::ImageView(), surfaceData.extent ); - vk::UniqueDescriptorPool descriptorPool = + vk::DescriptorPool descriptorPool = vk::su::createDescriptorPool( device, { { vk::DescriptorType::eUniformTexelBuffer, 1 } } ); - vk::UniqueDescriptorSet descriptorSet = std::move( - device->allocateDescriptorSetsUnique( vk::DescriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ) ) - .front() ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayout ); + vk::DescriptorSet descriptorSet = device.allocateDescriptorSets( descriptorSetAllocateInfo ).front(); vk::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformTexelBuffer, texelBufferData.buffer, texelBufferView } }, {} ); - vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); - vk::UniquePipeline graphicsPipeline = - vk::su::createGraphicsPipeline( device, - pipelineCache, - std::make_pair( *vertexShaderModule, nullptr ), - std::make_pair( *fragmentShaderModule, nullptr ), - 0, - {}, - vk::FrontFace::eClockwise, - false, - pipelineLayout, - renderPass ); + vk::PipelineCache pipelineCache = device.createPipelineCache( vk::PipelineCacheCreateInfo() ); + vk::Pipeline graphicsPipeline = vk::su::createGraphicsPipeline( device, + pipelineCache, + std::make_pair( vertexShaderModule, nullptr ), + std::make_pair( fragmentShaderModule, nullptr ), + 0, + {}, + vk::FrontFace::eClockwise, + false, + pipelineLayout, + renderPass ); /* VULKAN_KEY_START */ // Get the index of the next available swapchain image: - vk::UniqueSemaphore imageAcquiredSemaphore = device->createSemaphoreUnique( vk::SemaphoreCreateInfo() ); - vk::ResultValue currentBuffer = device->acquireNextImageKHR( - swapChainData.swapChain.get(), vk::su::FenceTimeout, imageAcquiredSemaphore.get(), nullptr ); + vk::Semaphore imageAcquiredSemaphore = device.createSemaphore( vk::SemaphoreCreateInfo() ); + vk::ResultValue currentBuffer = + device.acquireNextImageKHR( swapChainData.swapChain, vk::su::FenceTimeout, imageAcquiredSemaphore, nullptr ); assert( currentBuffer.result == vk::Result::eSuccess ); assert( currentBuffer.value < framebuffers.size() ); - commandBuffer->begin( vk::CommandBufferBeginInfo() ); + commandBuffer.begin( vk::CommandBufferBeginInfo() ); vk::ClearValue clearValue; clearValue.color = vk::ClearColorValue( std::array( { { 0.2f, 0.2f, 0.2f, 0.2f } } ) ); - vk::RenderPassBeginInfo renderPassBeginInfo( renderPass.get(), - framebuffers[currentBuffer.value].get(), + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, + framebuffers[currentBuffer.value], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValue ); - commandBuffer->beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer->bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline.get() ); - commandBuffer->bindDescriptorSets( - vk::PipelineBindPoint::eGraphics, pipelineLayout.get(), 0, descriptorSet.get(), nullptr ); + commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, descriptorSet, nullptr ); - commandBuffer->setViewport( 0, - vk::Viewport( 0.0f, - 0.0f, - static_cast( surfaceData.extent.width ), - static_cast( surfaceData.extent.height ), - 0.0f, - 1.0f ) ); - commandBuffer->setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); + commandBuffer.setViewport( 0, + vk::Viewport( 0.0f, + 0.0f, + static_cast( surfaceData.extent.width ), + static_cast( surfaceData.extent.height ), + 0.0f, + 1.0f ) ); + commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer->draw( 3, 1, 0, 0 ); - commandBuffer->endRenderPass(); - commandBuffer->end(); + commandBuffer.draw( 3, 1, 0, 0 ); + commandBuffer.endRenderPass(); + commandBuffer.end(); - vk::UniqueFence drawFence = device->createFenceUnique( vk::FenceCreateInfo() ); + vk::Fence drawFence = device.createFence( vk::FenceCreateInfo() ); vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); - vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); - graphicsQueue.submit( submitInfo, drawFence.get() ); + vk::SubmitInfo submitInfo( imageAcquiredSemaphore, waitDestinationStageMask, commandBuffer ); + graphicsQueue.submit( submitInfo, drawFence ); - while ( vk::Result::eTimeout == device->waitForFences( drawFence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( drawFence, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::Result result = - presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, currentBuffer.value ) ); + presentQueue.presentKHR( vk::PresentInfoKHR( {}, swapChainData.swapChain, currentBuffer.value ) ); switch ( result ) { case vk::Result::eSuccess: break; @@ -217,7 +214,32 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_END */ - device->waitIdle(); + device.waitIdle(); + + device.destroyFence( drawFence ); + device.destroySemaphore( imageAcquiredSemaphore ); + device.destroyPipeline( graphicsPipeline ); + device.destroyPipelineCache( pipelineCache ); + device.freeDescriptorSets( descriptorPool, descriptorSet ); + device.destroyDescriptorPool( descriptorPool ); + for ( auto framebuffer : framebuffers ) + { + device.destroyFramebuffer( framebuffer ); + } + device.destroyShaderModule( fragmentShaderModule ); + device.destroyShaderModule( vertexShaderModule ); + device.destroyRenderPass( renderPass ); + device.destroyPipelineLayout( pipelineLayout ); + device.destroyDescriptorSetLayout( descriptorSetLayout ); + device.destroyBufferView( texelBufferView ); + texelBufferData.clear( device ); + swapChainData.clear( device ); + device.freeCommandBuffers( commandPool, commandBuffer ); + device.destroyCommandPool( commandPool ); + device.destroy(); + instance.destroySurfaceKHR( surfaceData.surface ); + instance.destroyDebugUtilsMessengerEXT( debugUtilsMessenger ); + instance.destroy(); } catch ( vk::SystemError & err ) { diff --git a/samples/utils/math.cpp b/samples/utils/math.cpp index 4547125..b035cc4 100644 --- a/samples/utils/math.cpp +++ b/samples/utils/math.cpp @@ -40,22 +40,12 @@ namespace vk glm::mat4x4 view = glm::lookAt( glm::vec3( -5.0f, 3.0f, -10.0f ), glm::vec3( 0.0f, 0.0f, 0.0f ), glm::vec3( 0.0f, -1.0f, 0.0f ) ); glm::mat4x4 projection = glm::perspective( fov, 1.0f, 0.1f, 100.0f ); - glm::mat4x4 clip = glm::mat4x4( 1.0f, - 0.0f, - 0.0f, - 0.0f, - 0.0f, - -1.0f, - 0.0f, - 0.0f, - 0.0f, - 0.0f, - 0.5f, - 0.0f, - 0.0f, - 0.0f, - 0.5f, - 1.0f ); // vulkan clip space has inverted y and half z ! + // clang-format off + glm::mat4x4 clip = glm::mat4x4( 1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, -1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.5f, 0.0f, + 0.0f, 0.0f, 0.5f, 1.0f ); // vulkan clip space has inverted y and half z ! + // clang-format on return clip * projection * view * model; } } // namespace su diff --git a/samples/utils/shaders.cpp b/samples/utils/shaders.cpp index 7646a75..05b988f 100644 --- a/samples/utils/shaders.cpp +++ b/samples/utils/shaders.cpp @@ -86,9 +86,9 @@ namespace vk return true; } - vk::UniqueShaderModule createShaderModule( vk::UniqueDevice & device, - vk::ShaderStageFlagBits shaderStage, - std::string const & shaderText ) + vk::ShaderModule createShaderModule( vk::Device const & device, + vk::ShaderStageFlagBits shaderStage, + std::string const & shaderText ) { std::vector shaderSPV; if ( !GLSLtoSPV( shaderStage, shaderText, shaderSPV ) ) @@ -96,7 +96,7 @@ namespace vk throw std::runtime_error( "Could not convert glsl shader to spir-v -> terminating" ); } - return device->createShaderModuleUnique( vk::ShaderModuleCreateInfo( vk::ShaderModuleCreateFlags(), shaderSPV ) ); + return device.createShaderModule( vk::ShaderModuleCreateInfo( vk::ShaderModuleCreateFlags(), shaderSPV ) ); } } // namespace su } // namespace vk diff --git a/samples/utils/shaders.hpp b/samples/utils/shaders.hpp index 5c9f2cc..de7f04c 100644 --- a/samples/utils/shaders.hpp +++ b/samples/utils/shaders.hpp @@ -22,9 +22,9 @@ namespace vk { namespace su { - vk::UniqueShaderModule createShaderModule( vk::UniqueDevice & device, - vk::ShaderStageFlagBits shaderStage, - std::string const & shaderText ); + vk::ShaderModule createShaderModule( vk::Device const & device, + vk::ShaderStageFlagBits shaderStage, + std::string const & shaderText ); bool GLSLtoSPV( const vk::ShaderStageFlagBits shaderType, std::string const & glslShader, diff --git a/samples/utils/utils.cpp b/samples/utils/utils.cpp index 8e3cc8e..b419b03 100644 --- a/samples/utils/utils.cpp +++ b/samples/utils/utils.cpp @@ -38,7 +38,7 @@ namespace vk { namespace su { - vk::UniqueDeviceMemory allocateMemory( vk::UniqueDevice const & device, + vk::DeviceMemory allocateDeviceMemory( vk::Device const & device, vk::PhysicalDeviceMemoryProperties const & memoryProperties, vk::MemoryRequirements const & memoryRequirements, vk::MemoryPropertyFlags memoryPropertyFlags ) @@ -46,38 +46,32 @@ namespace vk uint32_t memoryTypeIndex = findMemoryType( memoryProperties, memoryRequirements.memoryTypeBits, memoryPropertyFlags ); - return device->allocateMemoryUnique( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); + return device.allocateMemory( vk::MemoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ) ); } bool contains( std::vector const & extensionProperties, std::string const & extensionName ) { - return std::find_if( extensionProperties.begin(), - extensionProperties.end(), - [&extensionName]( vk::ExtensionProperties const & ep ) { - return extensionName == ep.extensionName; - } ) != extensionProperties.end(); + auto propertyIterator = std::find_if( + extensionProperties.begin(), extensionProperties.end(), [&extensionName]( vk::ExtensionProperties const & ep ) { + return extensionName == ep.extensionName; + } ); + return ( propertyIterator != extensionProperties.end() ); } - vk::UniqueCommandPool createCommandPool( vk::UniqueDevice & device, uint32_t queueFamilyIndex ) + vk::CommandPool createCommandPool( vk::Device const & device, uint32_t queueFamilyIndex ) { vk::CommandPoolCreateInfo commandPoolCreateInfo( vk::CommandPoolCreateFlagBits::eResetCommandBuffer, queueFamilyIndex ); - return device->createCommandPoolUnique( commandPoolCreateInfo ); + return device.createCommandPool( commandPoolCreateInfo ); } - vk::UniqueDebugUtilsMessengerEXT createDebugUtilsMessenger( vk::UniqueInstance & instance ) + vk::DebugUtilsMessengerEXT createDebugUtilsMessengerEXT( vk::Instance const & instance ) { - vk::DebugUtilsMessageSeverityFlagsEXT severityFlags( vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning | - vk::DebugUtilsMessageSeverityFlagBitsEXT::eError ); - vk::DebugUtilsMessageTypeFlagsEXT messageTypeFlags( vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral | - vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance | - vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation ); - return instance->createDebugUtilsMessengerEXTUnique( vk::DebugUtilsMessengerCreateInfoEXT( - {}, severityFlags, messageTypeFlags, &vk::su::debugUtilsMessengerCallback ) ); + return instance.createDebugUtilsMessengerEXT( vk::su::makeDebugUtilsMessengerCreateInfoEXT() ); } - vk::UniqueDescriptorPool createDescriptorPool( vk::UniqueDevice & device, - std::vector const & poolSizes ) + vk::DescriptorPool createDescriptorPool( vk::Device const & device, + std::vector const & poolSizes ) { assert( !poolSizes.empty() ); uint32_t maxSets = @@ -88,11 +82,11 @@ namespace vk vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo( vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, maxSets, poolSizes ); - return device->createDescriptorPoolUnique( descriptorPoolCreateInfo ); + return device.createDescriptorPool( descriptorPoolCreateInfo ); } - vk::UniqueDescriptorSetLayout createDescriptorSetLayout( - vk::UniqueDevice const & device, + vk::DescriptorSetLayout createDescriptorSetLayout( + vk::Device const & device, std::vector> const & bindingData, vk::DescriptorSetLayoutCreateFlags flags ) { @@ -104,14 +98,14 @@ namespace vk std::get<1>( bindingData[i] ), std::get<2>( bindingData[i] ) ); } - return device->createDescriptorSetLayoutUnique( vk::DescriptorSetLayoutCreateInfo( flags, bindings ) ); + return device.createDescriptorSetLayout( vk::DescriptorSetLayoutCreateInfo( flags, bindings ) ); } - vk::UniqueDevice createDevice( vk::PhysicalDevice physicalDevice, - uint32_t queueFamilyIndex, - std::vector const & extensions, - vk::PhysicalDeviceFeatures const * physicalDeviceFeatures, - void const * pNext ) + vk::Device createDevice( vk::PhysicalDevice const & physicalDevice, + uint32_t queueFamilyIndex, + std::vector const & extensions, + vk::PhysicalDeviceFeatures const * physicalDeviceFeatures, + void const * pNext ) { std::vector enabledExtensions; enabledExtensions.reserve( extensions.size() ); @@ -120,54 +114,47 @@ namespace vk enabledExtensions.push_back( ext.data() ); } - // create a UniqueDevice float queuePriority = 0.0f; - vk::DeviceQueueCreateInfo deviceQueueCreateInfo( - vk::DeviceQueueCreateFlags(), queueFamilyIndex, 1, &queuePriority ); - vk::DeviceCreateInfo deviceCreateInfo( - vk::DeviceCreateFlags(), deviceQueueCreateInfo, {}, enabledExtensions, physicalDeviceFeatures ); + vk::DeviceQueueCreateInfo deviceQueueCreateInfo( {}, queueFamilyIndex, 1, &queuePriority ); + vk::DeviceCreateInfo deviceCreateInfo( {}, deviceQueueCreateInfo, {}, enabledExtensions, physicalDeviceFeatures ); deviceCreateInfo.pNext = pNext; - return physicalDevice.createDeviceUnique( deviceCreateInfo ); + + return physicalDevice.createDevice( deviceCreateInfo ); } - std::vector createFramebuffers( vk::UniqueDevice & device, - vk::UniqueRenderPass & renderPass, - std::vector const & imageViews, - vk::UniqueImageView const & depthImageView, - vk::Extent2D const & extent ) + std::vector createFramebuffers( vk::Device const & device, + vk::RenderPass & renderPass, + std::vector const & imageViews, + vk::ImageView const & depthImageView, + vk::Extent2D const & extent ) { vk::ImageView attachments[2]; - attachments[1] = depthImageView.get(); + attachments[1] = depthImageView; - vk::FramebufferCreateInfo framebufferCreateInfo( vk::FramebufferCreateFlags(), - *renderPass, - depthImageView ? 2 : 1, - attachments, - extent.width, - extent.height, - 1 ); - std::vector framebuffers; + vk::FramebufferCreateInfo framebufferCreateInfo( + vk::FramebufferCreateFlags(), renderPass, depthImageView ? 2 : 1, attachments, extent.width, extent.height, 1 ); + std::vector framebuffers; framebuffers.reserve( imageViews.size() ); for ( auto const & view : imageViews ) { - attachments[0] = view.get(); - framebuffers.push_back( device->createFramebufferUnique( framebufferCreateInfo ) ); + attachments[0] = view; + framebuffers.push_back( device.createFramebuffer( framebufferCreateInfo ) ); } return framebuffers; } - vk::UniquePipeline - createGraphicsPipeline( vk::UniqueDevice const & device, - vk::UniquePipelineCache const & pipelineCache, + vk::Pipeline + createGraphicsPipeline( vk::Device const & device, + vk::PipelineCache const & pipelineCache, std::pair const & vertexShaderData, std::pair const & fragmentShaderData, uint32_t vertexStride, std::vector> const & vertexInputAttributeFormatOffset, vk::FrontFace frontFace, bool depthBuffered, - vk::UniquePipelineLayout const & pipelineLayout, - vk::UniqueRenderPass const & renderPass ) + vk::PipelineLayout const & pipelineLayout, + vk::RenderPass const & renderPass ) { std::array pipelineShaderStageCreateInfos = { vk::PipelineShaderStageCreateInfo( vk::PipelineShaderStageCreateFlags(), @@ -191,8 +178,8 @@ namespace vk vertexInputAttributeDescriptions.reserve( vertexInputAttributeFormatOffset.size() ); for ( uint32_t i = 0; i < vertexInputAttributeFormatOffset.size(); i++ ) { - vertexInputAttributeDescriptions.push_back( vk::VertexInputAttributeDescription( - i, 0, vertexInputAttributeFormatOffset[i].first, vertexInputAttributeFormatOffset[i].second ) ); + vertexInputAttributeDescriptions.emplace_back( + i, 0, vertexInputAttributeFormatOffset[i].first, vertexInputAttributeFormatOffset[i].second ); } pipelineVertexInputStateCreateInfo.setVertexBindingDescriptions( vertexInputBindingDescription ); pipelineVertexInputStateCreateInfo.setVertexAttributeDescriptions( vertexInputAttributeDescriptions ); @@ -262,53 +249,21 @@ namespace vk &pipelineDepthStencilStateCreateInfo, &pipelineColorBlendStateCreateInfo, &pipelineDynamicStateCreateInfo, - pipelineLayout.get(), - renderPass.get() ); + pipelineLayout, + renderPass ); - auto result = device->createGraphicsPipelineUnique( pipelineCache.get(), graphicsPipelineCreateInfo ); + auto result = device.createGraphicsPipeline( pipelineCache, graphicsPipelineCreateInfo ); assert( result.result == vk::Result::eSuccess ); - return std::move( result.value ); + return result.value; } - vk::UniqueInstance createInstance( std::string const & appName, - std::string const & engineName, - std::vector const & layers, - std::vector const & extensions, - uint32_t apiVersion ) + std::vector gatherExtensions( std::vector const & extensions +#if !defined( NDEBUG ) + , + std::vector const & extensionProperties +#endif + ) { -#if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - static vk::DynamicLoader dl; - PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr = - dl.getProcAddress( "vkGetInstanceProcAddr" ); - VULKAN_HPP_DEFAULT_DISPATCHER.init( vkGetInstanceProcAddr ); -#endif - -#if !defined( NDEBUG ) - std::vector layerProperties = vk::enumerateInstanceLayerProperties(); - std::vector extensionProperties = vk::enumerateInstanceExtensionProperties(); -#endif - - std::vector enabledLayers; - enabledLayers.reserve( layers.size() ); - for ( auto const & layer : layers ) - { - assert( - std::find_if( layerProperties.begin(), layerProperties.end(), [layer]( vk::LayerProperties const & lp ) { - return layer == lp.layerName; - } ) != layerProperties.end() ); - enabledLayers.push_back( layer.data() ); - } -#if !defined( NDEBUG ) - // Enable standard validation layer to find as much errors as possible! - if ( std::find( layers.begin(), layers.end(), "VK_LAYER_KHRONOS_validation" ) == layers.end() && - std::find_if( layerProperties.begin(), layerProperties.end(), []( vk::LayerProperties const & lp ) { - return ( strcmp( "VK_LAYER_KHRONOS_validation", lp.layerName ) == 0 ); - } ) != layerProperties.end() ) - { - enabledLayers.push_back( "VK_LAYER_KHRONOS_validation" ); - } -#endif - std::vector enabledExtensions; enabledExtensions.reserve( extensions.size() ); for ( auto const & ext : extensions ) @@ -329,9 +284,57 @@ namespace vk enabledExtensions.push_back( VK_EXT_DEBUG_UTILS_EXTENSION_NAME ); } #endif + return enabledExtensions; + } + + std::vector gatherLayers( std::vector const & layers +#if !defined( NDEBUG ) + , + std::vector const & layerProperties +#endif + ) + { + std::vector enabledLayers; + enabledLayers.reserve( layers.size() ); + for ( auto const & layer : layers ) + { + assert( + std::find_if( layerProperties.begin(), layerProperties.end(), [layer]( vk::LayerProperties const & lp ) { + return layer == lp.layerName; + } ) != layerProperties.end() ); + enabledLayers.push_back( layer.data() ); + } +#if !defined( NDEBUG ) + // Enable standard validation layer to find as much errors as possible! + if ( std::find( layers.begin(), layers.end(), "VK_LAYER_KHRONOS_validation" ) == layers.end() && + std::find_if( layerProperties.begin(), layerProperties.end(), []( vk::LayerProperties const & lp ) { + return ( strcmp( "VK_LAYER_KHRONOS_validation", lp.layerName ) == 0 ); + } ) != layerProperties.end() ) + { + enabledLayers.push_back( "VK_LAYER_KHRONOS_validation" ); + } +#endif + return enabledLayers; + } + + vk::Instance createInstance( std::string const & appName, + std::string const & engineName, + std::vector const & layers, + std::vector const & extensions, + uint32_t apiVersion ) + { +#if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + static vk::DynamicLoader dl; + PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr = + dl.getProcAddress( "vkGetInstanceProcAddr" ); + VULKAN_HPP_DEFAULT_DISPATCHER.init( vkGetInstanceProcAddr ); +#endif + + vk::ApplicationInfo applicationInfo( appName.c_str(), 1, engineName.c_str(), 1, apiVersion ); + std::vector enabledLayers = vk::su::gatherLayers( layers, vk::enumerateInstanceLayerProperties() ); + std::vector enabledExtensions = + vk::su::gatherExtensions( extensions, vk::enumerateInstanceExtensionProperties() ); - // create a UniqueInstance - vk::ApplicationInfo applicationInfo( appName.c_str(), 1, engineName.c_str(), 1, apiVersion ); #if defined( NDEBUG ) // in non-debug mode just use the InstanceCreateInfo for instance creation vk::StructureChain instanceCreateInfo( @@ -355,45 +358,47 @@ namespace vk { {}, severityFlags, messageTypeFlags, &vk::su::debugUtilsMessengerCallback } ); # endif #endif - vk::UniqueInstance instance = vk::createInstanceUnique( instanceCreateInfo.get() ); + + vk::Instance instance = + vk::createInstance( makeInstanceCreateInfoChain( applicationInfo, enabledLayers, enabledExtensions ) + .get() ); #if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) // initialize function pointers for instance - VULKAN_HPP_DEFAULT_DISPATCHER.init( *instance ); + VULKAN_HPP_DEFAULT_DISPATCHER.init( instance ); #endif return instance; } - vk::UniqueRenderPass createRenderPass( vk::UniqueDevice & device, - vk::Format colorFormat, - vk::Format depthFormat, - vk::AttachmentLoadOp loadOp, - vk::ImageLayout colorFinalLayout ) + vk::RenderPass createRenderPass( vk::Device const & device, + vk::Format colorFormat, + vk::Format depthFormat, + vk::AttachmentLoadOp loadOp, + vk::ImageLayout colorFinalLayout ) { std::vector attachmentDescriptions; assert( colorFormat != vk::Format::eUndefined ); - attachmentDescriptions.push_back( vk::AttachmentDescription( vk::AttachmentDescriptionFlags(), - colorFormat, - vk::SampleCountFlagBits::e1, - loadOp, - vk::AttachmentStoreOp::eStore, - vk::AttachmentLoadOp::eDontCare, - vk::AttachmentStoreOp::eDontCare, - vk::ImageLayout::eUndefined, - colorFinalLayout ) ); + attachmentDescriptions.emplace_back( vk::AttachmentDescriptionFlags(), + colorFormat, + vk::SampleCountFlagBits::e1, + loadOp, + vk::AttachmentStoreOp::eStore, + vk::AttachmentLoadOp::eDontCare, + vk::AttachmentStoreOp::eDontCare, + vk::ImageLayout::eUndefined, + colorFinalLayout ); if ( depthFormat != vk::Format::eUndefined ) { - attachmentDescriptions.push_back( - vk::AttachmentDescription( vk::AttachmentDescriptionFlags(), - depthFormat, - vk::SampleCountFlagBits::e1, - loadOp, - vk::AttachmentStoreOp::eDontCare, - vk::AttachmentLoadOp::eDontCare, - vk::AttachmentStoreOp::eDontCare, - vk::ImageLayout::eUndefined, - vk::ImageLayout::eDepthStencilAttachmentOptimal ) ); + attachmentDescriptions.emplace_back( vk::AttachmentDescriptionFlags(), + depthFormat, + vk::SampleCountFlagBits::e1, + loadOp, + vk::AttachmentStoreOp::eDontCare, + vk::AttachmentLoadOp::eDontCare, + vk::AttachmentStoreOp::eDontCare, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eDepthStencilAttachmentOptimal ); } vk::AttachmentReference colorAttachment( 0, vk::ImageLayout::eColorAttachmentOptimal ); vk::AttachmentReference depthAttachment( 1, vk::ImageLayout::eDepthStencilAttachmentOptimal ); @@ -404,7 +409,7 @@ namespace vk {}, ( depthFormat != vk::Format::eUndefined ) ? &depthAttachment : nullptr ); - return device->createRenderPassUnique( + return device.createRenderPass( vk::RenderPassCreateInfo( vk::RenderPassCreateFlags(), attachmentDescriptions, subpassDescription ) ); } @@ -414,7 +419,7 @@ namespace vk VkDebugUtilsMessengerCallbackDataEXT const * pCallbackData, void * /*pUserData*/ ) { -#if !defined(NDEBUG) +#if !defined( NDEBUG ) if ( pCallbackData->messageIdNumber == 648835635 ) { // UNASSIGNED-khronos-Validation-debug-build-warning-message @@ -481,15 +486,12 @@ namespace vk uint32_t findGraphicsQueueFamilyIndex( std::vector const & queueFamilyProperties ) { // get the first index into queueFamiliyProperties which supports graphics - size_t graphicsQueueFamilyIndex = std::distance( - queueFamilyProperties.begin(), - std::find_if( - queueFamilyProperties.begin(), queueFamilyProperties.end(), []( vk::QueueFamilyProperties const & qfp ) { - return qfp.queueFlags & vk::QueueFlagBits::eGraphics; - } ) ); - assert( graphicsQueueFamilyIndex < queueFamilyProperties.size() ); - - return checked_cast( graphicsQueueFamilyIndex ); + std::vector::const_iterator graphicsQueueFamilyProperty = std::find_if( + queueFamilyProperties.begin(), queueFamilyProperties.end(), []( vk::QueueFamilyProperties const & qfp ) { + return qfp.queueFlags & vk::QueueFlagBits::eGraphics; + } ); + assert( graphicsQueueFamilyProperty != queueFamilyProperties.end() ); + return static_cast( std::distance( queueFamilyProperties.begin(), graphicsQueueFamilyProperty ) ); } std::pair findGraphicsAndPresentQueueFamilyIndex( vk::PhysicalDevice physicalDevice, @@ -655,11 +657,11 @@ namespace vk return pickedFormat; } - void setImageLayout( vk::UniqueCommandBuffer const & commandBuffer, - vk::Image image, - vk::Format format, - vk::ImageLayout oldImageLayout, - vk::ImageLayout newImageLayout ) + void setImageLayout( vk::CommandBuffer const & commandBuffer, + vk::Image image, + vk::Format format, + vk::ImageLayout oldImageLayout, + vk::ImageLayout newImageLayout ) { vk::AccessFlags sourceAccessMask; switch ( oldImageLayout ) @@ -741,24 +743,24 @@ namespace vk VK_QUEUE_FAMILY_IGNORED, image, imageSubresourceRange ); - return commandBuffer->pipelineBarrier( sourceStage, destinationStage, {}, nullptr, nullptr, imageMemoryBarrier ); + return commandBuffer.pipelineBarrier( sourceStage, destinationStage, {}, nullptr, nullptr, imageMemoryBarrier ); } - void submitAndWait( vk::UniqueDevice & device, vk::Queue queue, vk::UniqueCommandBuffer & commandBuffer ) + void submitAndWait( vk::Device const & device, vk::Queue const & queue, vk::CommandBuffer const & commandBuffer ) { - vk::UniqueFence fence = device->createFenceUnique( vk::FenceCreateInfo() ); - queue.submit( vk::SubmitInfo( {}, {}, *commandBuffer ), fence.get() ); - while ( vk::Result::eTimeout == device->waitForFences( fence.get(), VK_TRUE, vk::su::FenceTimeout ) ) + vk::Fence fence = device.createFence( vk::FenceCreateInfo() ); + queue.submit( vk::SubmitInfo( 0, nullptr, nullptr, 1, &commandBuffer ), fence ); + while ( vk::Result::eTimeout == device.waitForFences( fence, VK_TRUE, vk::su::FenceTimeout ) ) ; + device.destroyFence( fence ); } void updateDescriptorSets( - vk::UniqueDevice const & device, - vk::UniqueDescriptorSet const & descriptorSet, - std::vector> const & - bufferData, - vk::su::TextureData const & textureData, - uint32_t bindingOffset ) + vk::Device const & device, + vk::DescriptorSet const & descriptorSet, + std::vector> const & bufferData, + vk::su::TextureData const & textureData, + uint32_t bindingOffset ) { std::vector bufferInfos; bufferInfos.reserve( bufferData.size() ); @@ -768,32 +770,25 @@ namespace vk uint32_t dstBinding = bindingOffset; for ( auto const & bd : bufferData ) { - bufferInfos.push_back( vk::DescriptorBufferInfo( *std::get<1>( bd ), 0, VK_WHOLE_SIZE ) ); - writeDescriptorSets.push_back( vk::WriteDescriptorSet( *descriptorSet, - dstBinding++, - 0, - 1, - std::get<0>( bd ), - nullptr, - &bufferInfos.back(), - std::get<2>( bd ) ? &*std::get<2>( bd ) : nullptr ) ); + bufferInfos.emplace_back( std::get<1>( bd ), 0, VK_WHOLE_SIZE ); + writeDescriptorSets.emplace_back( + descriptorSet, dstBinding++, 0, 1, std::get<0>( bd ), nullptr, &bufferInfos.back(), &std::get<2>( bd ) ); } vk::DescriptorImageInfo imageInfo( - *textureData.textureSampler, *textureData.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); - writeDescriptorSets.push_back( vk::WriteDescriptorSet( - *descriptorSet, dstBinding, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo, {}, nullptr ) ); + textureData.sampler, textureData.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + writeDescriptorSets.emplace_back( + descriptorSet, dstBinding, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo, nullptr, nullptr ); - device->updateDescriptorSets( writeDescriptorSets, nullptr ); + device.updateDescriptorSets( writeDescriptorSets, nullptr ); } void updateDescriptorSets( - vk::UniqueDevice const & device, - vk::UniqueDescriptorSet const & descriptorSet, - std::vector> const & - bufferData, - std::vector const & textureData, - uint32_t bindingOffset ) + vk::Device const & device, + vk::DescriptorSet const & descriptorSet, + std::vector> const & bufferData, + std::vector const & textureData, + uint32_t bindingOffset ) { std::vector bufferInfos; bufferInfos.reserve( bufferData.size() ); @@ -803,15 +798,9 @@ namespace vk uint32_t dstBinding = bindingOffset; for ( auto const & bd : bufferData ) { - bufferInfos.push_back( vk::DescriptorBufferInfo( *std::get<1>( bd ), 0, VK_WHOLE_SIZE ) ); - writeDescriptorSets.push_back( vk::WriteDescriptorSet( *descriptorSet, - dstBinding++, - 0, - 1, - std::get<0>( bd ), - nullptr, - &bufferInfos.back(), - std::get<2>( bd ) ? &*std::get<2>( bd ) : nullptr ) ); + bufferInfos.emplace_back( std::get<1>( bd ), 0, VK_WHOLE_SIZE ); + writeDescriptorSets.emplace_back( + descriptorSet, dstBinding++, 0, 1, std::get<0>( bd ), nullptr, &bufferInfos.back(), &std::get<2>( bd ) ); } std::vector imageInfos; @@ -820,24 +809,23 @@ namespace vk imageInfos.reserve( textureData.size() ); for ( auto const & td : textureData ) { - imageInfos.push_back( vk::DescriptorImageInfo( - *td.textureSampler, *td.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ) ); + imageInfos.emplace_back( td.sampler, td.imageData->imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); } - writeDescriptorSets.push_back( vk::WriteDescriptorSet( *descriptorSet, - dstBinding, - 0, - checked_cast( imageInfos.size() ), - vk::DescriptorType::eCombinedImageSampler, - imageInfos.data(), - nullptr, - nullptr ) ); + writeDescriptorSets.emplace_back( descriptorSet, + dstBinding, + 0, + checked_cast( imageInfos.size() ), + vk::DescriptorType::eCombinedImageSampler, + imageInfos.data(), + nullptr, + nullptr ); } - device->updateDescriptorSets( writeDescriptorSets, nullptr ); + device.updateDescriptorSets( writeDescriptorSets, nullptr ); } BufferData::BufferData( vk::PhysicalDevice const & physicalDevice, - vk::UniqueDevice const & device, + vk::Device const & device, vk::DeviceSize size, vk::BufferUsageFlags usage, vk::MemoryPropertyFlags propertyFlags ) @@ -845,18 +833,16 @@ namespace vk : m_size( size ), m_usage( usage ), m_propertyFlags( propertyFlags ) #endif { - buffer = device->createBufferUnique( vk::BufferCreateInfo( vk::BufferCreateFlags(), size, usage ) ); - deviceMemory = vk::su::allocateMemory( device, - physicalDevice.getMemoryProperties(), - device->getBufferMemoryRequirements( buffer.get() ), - propertyFlags ); - device->bindBufferMemory( buffer.get(), deviceMemory.get(), 0 ); + buffer = device.createBuffer( vk::BufferCreateInfo( vk::BufferCreateFlags(), size, usage ) ); + deviceMemory = vk::su::allocateDeviceMemory( + device, physicalDevice.getMemoryProperties(), device.getBufferMemoryRequirements( buffer ), propertyFlags ); + device.bindBufferMemory( buffer, deviceMemory, 0 ); } - DepthBufferData::DepthBufferData( vk::PhysicalDevice & physicalDevice, - vk::UniqueDevice & device, - vk::Format format, - vk::Extent2D const & extent ) + DepthBufferData::DepthBufferData( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::Format format, + vk::Extent2D const & extent ) : ImageData( physicalDevice, device, format, @@ -869,7 +855,7 @@ namespace vk {} ImageData::ImageData( vk::PhysicalDevice const & physicalDevice, - vk::UniqueDevice const & device, + vk::Device const & device, vk::Format format_, vk::Extent2D const & extent, vk::ImageTiling tiling, @@ -891,47 +877,41 @@ namespace vk vk::SharingMode::eExclusive, {}, initialLayout ); - image = device->createImageUnique( imageCreateInfo ); + image = device.createImage( imageCreateInfo ); - deviceMemory = vk::su::allocateMemory( device, - physicalDevice.getMemoryProperties(), - device->getImageMemoryRequirements( image.get() ), - memoryProperties ); + deviceMemory = vk::su::allocateDeviceMemory( + device, physicalDevice.getMemoryProperties(), device.getImageMemoryRequirements( image ), memoryProperties ); - device->bindImageMemory( image.get(), deviceMemory.get(), 0 ); + device.bindImageMemory( image, deviceMemory, 0 ); vk::ComponentMapping componentMapping( ComponentSwizzle::eR, ComponentSwizzle::eG, ComponentSwizzle::eB, ComponentSwizzle::eA ); - vk::ImageViewCreateInfo imageViewCreateInfo( vk::ImageViewCreateFlags(), - image.get(), - vk::ImageViewType::e2D, - format, - componentMapping, - vk::ImageSubresourceRange( aspectMask, 0, 1, 0, 1 ) ); - imageView = device->createImageViewUnique( imageViewCreateInfo ); + vk::ImageSubresourceRange imageSubresourceRange( aspectMask, 0, 1, 0, 1 ); + vk::ImageViewCreateInfo imageViewCreateInfo( + {}, image, vk::ImageViewType::e2D, format, componentMapping, imageSubresourceRange ); + imageView = device.createImageView( imageViewCreateInfo ); } - SurfaceData::SurfaceData( vk::UniqueInstance & instance, + SurfaceData::SurfaceData( vk::Instance const & instance, std::string const & windowName, vk::Extent2D const & extent_ ) : extent( extent_ ), window( vk::su::createWindow( windowName, extent ) ) { VkSurfaceKHR _surface; - VkResult err = glfwCreateWindowSurface( VkInstance( instance.get() ), window.handle, nullptr, &_surface ); + VkResult err = glfwCreateWindowSurface( static_cast( instance ), window.handle, nullptr, &_surface ); if ( err != VK_SUCCESS ) throw std::runtime_error( "Failed to create window!" ); - vk::ObjectDestroy _deleter( instance.get() ); - surface = vk::UniqueSurfaceKHR( vk::SurfaceKHR( _surface ), _deleter ); + surface = vk::SurfaceKHR( _surface ); } - SwapChainData::SwapChainData( vk::PhysicalDevice const & physicalDevice, - vk::UniqueDevice const & device, - vk::SurfaceKHR const & surface, - vk::Extent2D const & extent, - vk::ImageUsageFlags usage, - vk::UniqueSwapchainKHR const & oldSwapChain, - uint32_t graphicsQueueFamilyIndex, - uint32_t presentQueueFamilyIndex ) + SwapChainData::SwapChainData( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::SurfaceKHR const & surface, + vk::Extent2D const & extent, + vk::ImageUsageFlags usage, + vk::SwapchainKHR const & oldSwapChain, + uint32_t graphicsQueueFamilyIndex, + uint32_t presentQueueFamilyIndex ) { vk::SurfaceFormatKHR surfaceFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surface ) ); colorFormat = surfaceFormat.format; @@ -978,7 +958,7 @@ namespace vk compositeAlpha, presentMode, true, - *oldSwapChain ); + oldSwapChain ); if ( graphicsQueueFamilyIndex != presentQueueFamilyIndex ) { uint32_t queueFamilyIndices[2] = { graphicsQueueFamilyIndex, presentQueueFamilyIndex }; @@ -989,9 +969,9 @@ namespace vk swapChainCreateInfo.queueFamilyIndexCount = 2; swapChainCreateInfo.pQueueFamilyIndices = queueFamilyIndices; } - swapChain = device->createSwapchainKHRUnique( swapChainCreateInfo ); + swapChain = device.createSwapchainKHR( swapChainCreateInfo ); - images = device->getSwapchainImagesKHR( swapChain.get() ); + images = device.getSwapchainImagesKHR( swapChain ); imageViews.reserve( images.size() ); vk::ComponentMapping componentMapping( @@ -1001,7 +981,7 @@ namespace vk { vk::ImageViewCreateInfo imageViewCreateInfo( vk::ImageViewCreateFlags(), image, vk::ImageViewType::e2D, colorFormat, componentMapping, subResourceRange ); - imageViews.push_back( device->createImageViewUnique( imageViewCreateInfo ) ); + imageViews.push_back( device.createImageView( imageViewCreateInfo ) ); } } @@ -1062,7 +1042,7 @@ namespace vk } TextureData::TextureData( vk::PhysicalDevice const & physicalDevice, - vk::UniqueDevice const & device, + vk::Device const & device, vk::Extent2D const & extent_, vk::ImageUsageFlags usageFlags, vk::FormatFeatureFlags formatFeatureFlags, @@ -1103,21 +1083,21 @@ namespace vk requirements, vk::ImageAspectFlagBits::eColor ) ); - textureSampler = device->createSamplerUnique( vk::SamplerCreateInfo( vk::SamplerCreateFlags(), - vk::Filter::eLinear, - vk::Filter::eLinear, - vk::SamplerMipmapMode::eLinear, - vk::SamplerAddressMode::eRepeat, - vk::SamplerAddressMode::eRepeat, - vk::SamplerAddressMode::eRepeat, - 0.0f, - anisotropyEnable, - 16.0f, - false, - vk::CompareOp::eNever, - 0.0f, - 0.0f, - vk::BorderColor::eFloatOpaqueBlack ) ); + sampler = device.createSampler( vk::SamplerCreateInfo( vk::SamplerCreateFlags(), + vk::Filter::eLinear, + vk::Filter::eLinear, + vk::SamplerMipmapMode::eLinear, + vk::SamplerAddressMode::eRepeat, + vk::SamplerAddressMode::eRepeat, + vk::SamplerAddressMode::eRepeat, + 0.0f, + anisotropyEnable, + 16.0f, + false, + vk::CompareOp::eNever, + 0.0f, + 0.0f, + vk::BorderColor::eFloatOpaqueBlack ) ); } UUID::UUID( uint8_t const data[VK_UUID_SIZE] ) @@ -1167,6 +1147,43 @@ namespace vk GLFWwindow * window = glfwCreateWindow( extent.width, extent.height, windowName.c_str(), nullptr, nullptr ); return WindowData( window, windowName, extent ); } + + vk::DebugUtilsMessengerCreateInfoEXT makeDebugUtilsMessengerCreateInfoEXT() + { + return { {}, + vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning | vk::DebugUtilsMessageSeverityFlagBitsEXT::eError, + vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral | vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance | + vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation, + &vk::su::debugUtilsMessengerCallback }; + } + +#if defined( NDEBUG ) + vk::StructureChain +#else + vk::StructureChain +#endif + makeInstanceCreateInfoChain( vk::ApplicationInfo const & applicationInfo, + std::vector const & layers, + std::vector const & extensions ) + { +#if defined( NDEBUG ) + // in non-debug mode just use the InstanceCreateInfo for instance creation + vk::StructureChain instanceCreateInfo( + { {}, &applicationInfo, enabledLayers, enabledExtensions } ); +#else + // in debug mode, addionally use the debugUtilsMessengerCallback in instance creation! + vk::DebugUtilsMessageSeverityFlagsEXT severityFlags( vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning | + vk::DebugUtilsMessageSeverityFlagBitsEXT::eError ); + vk::DebugUtilsMessageTypeFlagsEXT messageTypeFlags( vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral | + vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance | + vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation ); + vk::StructureChain instanceCreateInfo( + { {}, &applicationInfo, layers, extensions }, + { {}, severityFlags, messageTypeFlags, &vk::su::debugUtilsMessengerCallback } ); +#endif + return instanceCreateInfo; + } + } // namespace su } // namespace vk diff --git a/samples/utils/utils.hpp b/samples/utils/utils.hpp index fee32cb..9dd0df8 100644 --- a/samples/utils/utils.hpp +++ b/samples/utils/utils.hpp @@ -29,38 +29,37 @@ namespace vk const uint64_t FenceTimeout = 100000000; template - void oneTimeSubmit( vk::UniqueCommandBuffer const & commandBuffer, vk::Queue const & queue, Func const & func ) + void oneTimeSubmit( vk::CommandBuffer const & commandBuffer, vk::Queue const & queue, Func const & func ) { - commandBuffer->begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlagBits::eOneTimeSubmit ) ); + commandBuffer.begin( vk::CommandBufferBeginInfo( vk::CommandBufferUsageFlagBits::eOneTimeSubmit ) ); func( commandBuffer ); - commandBuffer->end(); - queue.submit( vk::SubmitInfo( 0, nullptr, nullptr, 1, &( *commandBuffer ) ), nullptr ); + commandBuffer.end(); + queue.submit( vk::SubmitInfo( 0, nullptr, nullptr, 1, &commandBuffer ), nullptr ); queue.waitIdle(); } template - void oneTimeSubmit( vk::UniqueDevice const & device, - vk::UniqueCommandPool const & commandPool, - vk::Queue const & queue, - Func const & func ) + void oneTimeSubmit( vk::Device const & device, + vk::CommandPool const & commandPool, + vk::Queue const & queue, + Func const & func ) { - vk::UniqueCommandBuffer commandBuffer = - std::move( device - ->allocateCommandBuffersUnique( - vk::CommandBufferAllocateInfo( *commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) - .front() ); + vk::CommandBuffer commandBuffer = + device + .allocateCommandBuffers( vk::CommandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ) + .front(); oneTimeSubmit( commandBuffer, queue, func ); } template - void copyToDevice( vk::UniqueDevice const & device, - vk::UniqueDeviceMemory const & memory, - T const * pData, - size_t count, - vk::DeviceSize stride = sizeof( T ) ) + void copyToDevice( vk::Device const & device, + vk::DeviceMemory const & deviceMemory, + T const * pData, + size_t count, + vk::DeviceSize stride = sizeof( T ) ) { assert( sizeof( T ) <= stride ); - uint8_t * deviceData = static_cast( device->mapMemory( memory.get(), 0, count * stride ) ); + uint8_t * deviceData = static_cast( device.mapMemory( deviceMemory, 0, count * stride ) ); if ( stride == sizeof( T ) ) { memcpy( deviceData, pData, count * sizeof( T ) ); @@ -73,13 +72,13 @@ namespace vk deviceData += stride; } } - device->unmapMemory( memory.get() ); + device.unmapMemory( deviceMemory ); } template - void copyToDevice( vk::UniqueDevice const & device, vk::UniqueDeviceMemory const & memory, T const & data ) + void copyToDevice( vk::Device const & device, vk::DeviceMemory const & deviceMemory, T const & data ) { - copyToDevice( device, memory, &data, 1 ); + copyToDevice( device, deviceMemory, &data, 1 ); } template @@ -88,11 +87,11 @@ namespace vk return v < lo ? lo : hi < v ? hi : v; } - void setImageLayout( vk::UniqueCommandBuffer const & commandBuffer, - vk::Image image, - vk::Format format, - vk::ImageLayout oldImageLayout, - vk::ImageLayout newImageLayout ); + void setImageLayout( vk::CommandBuffer const & commandBuffer, + vk::Image image, + vk::Format format, + vk::ImageLayout oldImageLayout, + vk::ImageLayout newImageLayout ); struct WindowData { @@ -111,26 +110,32 @@ namespace vk struct BufferData { BufferData( vk::PhysicalDevice const & physicalDevice, - vk::UniqueDevice const & device, + vk::Device const & device, vk::DeviceSize size, vk::BufferUsageFlags usage, vk::MemoryPropertyFlags propertyFlags = vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent ); + void clear( vk::Device const & device ) + { + device.freeMemory( deviceMemory ); + device.destroyBuffer( buffer ); + } + template - void upload( vk::UniqueDevice const & device, DataType const & data ) const + void upload( vk::Device const & device, DataType const & data ) const { assert( ( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostCoherent ) && ( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostVisible ) ); assert( sizeof( DataType ) <= m_size ); - void * dataPtr = device->mapMemory( *this->deviceMemory, 0, sizeof( DataType ) ); + void * dataPtr = device.mapMemory( deviceMemory, 0, sizeof( DataType ) ); memcpy( dataPtr, &data, sizeof( DataType ) ); - device->unmapMemory( *this->deviceMemory ); + device.unmapMemory( deviceMemory ); } template - void upload( vk::UniqueDevice const & device, std::vector const & data, size_t stride = 0 ) const + void upload( vk::Device const & device, std::vector const & data, size_t stride = 0 ) const { assert( m_propertyFlags & vk::MemoryPropertyFlagBits::eHostVisible ); @@ -142,8 +147,8 @@ namespace vk template void upload( vk::PhysicalDevice const & physicalDevice, - vk::UniqueDevice const & device, - vk::UniqueCommandPool const & commandPool, + vk::Device const & device, + vk::CommandPool const & commandPool, vk::Queue queue, std::vector const & data, size_t stride ) const @@ -160,13 +165,15 @@ namespace vk vk::su::BufferData stagingBuffer( physicalDevice, device, dataSize, vk::BufferUsageFlagBits::eTransferSrc ); copyToDevice( device, stagingBuffer.deviceMemory, data.data(), data.size(), elementSize ); - vk::su::oneTimeSubmit( device, commandPool, queue, [&]( vk::UniqueCommandBuffer const & commandBuffer ) { - commandBuffer->copyBuffer( *stagingBuffer.buffer, *this->buffer, vk::BufferCopy( 0, 0, dataSize ) ); + vk::su::oneTimeSubmit( device, commandPool, queue, [&]( vk::CommandBuffer const & commandBuffer ) { + commandBuffer.copyBuffer( stagingBuffer.buffer, buffer, vk::BufferCopy( 0, 0, dataSize ) ); } ); + + stagingBuffer.clear( device ); } - vk::UniqueBuffer buffer; - vk::UniqueDeviceMemory deviceMemory; + vk::Buffer buffer; + vk::DeviceMemory deviceMemory; #if !defined( NDEBUG ) private: vk::DeviceSize m_size; @@ -178,7 +185,7 @@ namespace vk struct ImageData { ImageData( vk::PhysicalDevice const & physicalDevice, - vk::UniqueDevice const & device, + vk::Device const & device, vk::Format format, vk::Extent2D const & extent, vk::ImageTiling tiling, @@ -187,44 +194,62 @@ namespace vk vk::MemoryPropertyFlags memoryProperties, vk::ImageAspectFlags aspectMask ); - vk::Format format; - vk::UniqueImage image; - vk::UniqueDeviceMemory deviceMemory; - vk::UniqueImageView imageView; + void clear( vk::Device const & device ) + { + device.destroyImageView( imageView ); + device.freeMemory( deviceMemory ); + device.destroyImage( image ); + } + + vk::Format format; + vk::Image image; + vk::DeviceMemory deviceMemory; + vk::ImageView imageView; }; struct DepthBufferData : public ImageData { - DepthBufferData( vk::PhysicalDevice & physicalDevice, - vk::UniqueDevice & device, - vk::Format format, - vk::Extent2D const & extent ); + DepthBufferData( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::Format format, + vk::Extent2D const & extent ); }; struct SurfaceData { - SurfaceData( vk::UniqueInstance & instance, std::string const & windowName, vk::Extent2D const & extent ); + SurfaceData( vk::Instance const & instance, std::string const & windowName, vk::Extent2D const & extent ); - vk::Extent2D extent; - WindowData window; - vk::UniqueSurfaceKHR surface; + vk::Extent2D extent; + WindowData window; + vk::SurfaceKHR surface; }; struct SwapChainData { - SwapChainData( vk::PhysicalDevice const & physicalDevice, - vk::UniqueDevice const & device, - vk::SurfaceKHR const & surface, - vk::Extent2D const & extent, - vk::ImageUsageFlags usage, - vk::UniqueSwapchainKHR const & oldSwapChain, - uint32_t graphicsFamilyIndex, - uint32_t presentFamilyIndex ); + SwapChainData( vk::PhysicalDevice const & physicalDevice, + vk::Device const & device, + vk::SurfaceKHR const & surface, + vk::Extent2D const & extent, + vk::ImageUsageFlags usage, + vk::SwapchainKHR const & oldSwapChain, + uint32_t graphicsFamilyIndex, + uint32_t presentFamilyIndex ); - vk::Format colorFormat; - vk::UniqueSwapchainKHR swapChain; - std::vector images; - std::vector imageViews; + void clear( vk::Device const & device ) + { + for ( auto & imageView : imageViews ) + { + device.destroyImageView( imageView ); + } + imageViews.clear(); + images.clear(); + device.destroySwapchainKHR( swapChain ); + } + + vk::Format colorFormat; + vk::SwapchainKHR swapChain; + std::vector images; + std::vector imageViews; }; class CheckerboardImageGenerator @@ -267,33 +292,42 @@ namespace vk struct TextureData { TextureData( vk::PhysicalDevice const & physicalDevice, - vk::UniqueDevice const & device, + vk::Device const & device, vk::Extent2D const & extent_ = { 256, 256 }, vk::ImageUsageFlags usageFlags = {}, vk::FormatFeatureFlags formatFeatureFlags = {}, bool anisotropyEnable = false, bool forceStaging = false ); - template - void setImage( vk::UniqueDevice const & device, - vk::UniqueCommandBuffer const & commandBuffer, - ImageGenerator const & imageGenerator ) + void clear( vk::Device const & device ) { - void * data = - needsStaging - ? device->mapMemory( stagingBufferData->deviceMemory.get(), - 0, - device->getBufferMemoryRequirements( stagingBufferData->buffer.get() ).size ) - : device->mapMemory( - imageData->deviceMemory.get(), 0, device->getImageMemoryRequirements( imageData->image.get() ).size ); + if ( stagingBufferData ) + { + stagingBufferData->clear( device ); + } + imageData->clear( device ); + device.destroySampler( sampler ); + } + + template + void setImage( vk::Device const & device, + vk::CommandBuffer const & commandBuffer, + ImageGenerator const & imageGenerator ) + { + void * data = needsStaging + ? device.mapMemory( stagingBufferData->deviceMemory, + 0, + device.getBufferMemoryRequirements( stagingBufferData->buffer ).size ) + : device.mapMemory( + imageData->deviceMemory, 0, device.getImageMemoryRequirements( imageData->image ).size ); imageGenerator( data, extent ); - device->unmapMemory( needsStaging ? stagingBufferData->deviceMemory.get() : imageData->deviceMemory.get() ); + device.unmapMemory( needsStaging ? stagingBufferData->deviceMemory : imageData->deviceMemory ); if ( needsStaging ) { // Since we're going to blit to the texture image, set its layout to eTransferDstOptimal vk::su::setImageLayout( commandBuffer, - imageData->image.get(), + imageData->image, imageData->format, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); @@ -303,11 +337,11 @@ namespace vk vk::ImageSubresourceLayers( vk::ImageAspectFlagBits::eColor, 0, 0, 1 ), vk::Offset3D( 0, 0, 0 ), vk::Extent3D( extent, 1 ) ); - commandBuffer->copyBufferToImage( - stagingBufferData->buffer.get(), imageData->image.get(), vk::ImageLayout::eTransferDstOptimal, copyRegion ); + commandBuffer.copyBufferToImage( + stagingBufferData->buffer, imageData->image, vk::ImageLayout::eTransferDstOptimal, copyRegion ); // Set the layout for the texture image from eTransferDstOptimal to SHADER_READ_ONLY vk::su::setImageLayout( commandBuffer, - imageData->image.get(), + imageData->image, imageData->format, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal ); @@ -316,7 +350,7 @@ namespace vk { // If we can use the linear tiled image as a texture, just do it vk::su::setImageLayout( commandBuffer, - imageData->image.get(), + imageData->image, imageData->format, vk::ImageLayout::ePreinitialized, vk::ImageLayout::eShaderReadOnlyOptimal ); @@ -328,7 +362,7 @@ namespace vk bool needsStaging; std::unique_ptr stagingBufferData; std::unique_ptr imageData; - vk::UniqueSampler textureSampler; + vk::Sampler sampler; }; struct UUID @@ -351,51 +385,51 @@ namespace vk return static_cast( value ); } - vk::UniqueDeviceMemory allocateMemory( vk::UniqueDevice const & device, - vk::PhysicalDeviceMemoryProperties const & memoryProperties, - vk::MemoryRequirements const & memoryRequirements, - vk::MemoryPropertyFlags memoryPropertyFlags ); - bool contains( std::vector const & extensionProperties, - std::string const & extensionName ); - vk::UniqueCommandPool createCommandPool( vk::UniqueDevice & device, uint32_t queueFamilyIndex ); - vk::UniqueDebugUtilsMessengerEXT createDebugUtilsMessenger( vk::UniqueInstance & instance ); - vk::UniqueDescriptorPool createDescriptorPool( vk::UniqueDevice & device, - std::vector const & poolSizes ); - vk::UniqueDescriptorSetLayout createDescriptorSetLayout( - vk::UniqueDevice const & device, + vk::DeviceMemory allocateDeviceMemory( vk::Device const & device, + vk::PhysicalDeviceMemoryProperties const & memoryProperties, + vk::MemoryRequirements const & memoryRequirements, + vk::MemoryPropertyFlags memoryPropertyFlags ); + bool contains( std::vector const & extensionProperties, + std::string const & extensionName ); + vk::CommandPool createCommandPool( vk::Device const & device, uint32_t queueFamilyIndex ); + vk::DebugUtilsMessengerEXT createDebugUtilsMessengerEXT( vk::Instance const & instance ); + vk::DescriptorPool createDescriptorPool( vk::Device const & device, + std::vector const & poolSizes ); + vk::DescriptorSetLayout createDescriptorSetLayout( + vk::Device const & device, std::vector> const & bindingData, vk::DescriptorSetLayoutCreateFlags flags = {} ); - vk::UniqueDevice createDevice( vk::PhysicalDevice physicalDevice, - uint32_t queueFamilyIndex, - std::vector const & extensions = {}, - vk::PhysicalDeviceFeatures const * physicalDeviceFeatures = nullptr, - void const * pNext = nullptr ); - std::vector createFramebuffers( vk::UniqueDevice & device, - vk::UniqueRenderPass & renderPass, - std::vector const & imageViews, - vk::UniqueImageView const & depthImageView, - vk::Extent2D const & extent ); - vk::UniquePipeline - createGraphicsPipeline( vk::UniqueDevice const & device, - vk::UniquePipelineCache const & pipelineCache, - std::pair const & vertexShaderData, - std::pair const & fragmentShaderData, - uint32_t vertexStride, - std::vector> const & vertexInputAttributeFormatOffset, - vk::FrontFace frontFace, - bool depthBuffered, - vk::UniquePipelineLayout const & pipelineLayout, - vk::UniqueRenderPass const & renderPass ); - vk::UniqueInstance createInstance( std::string const & appName, - std::string const & engineName, - std::vector const & layers = {}, - std::vector const & extensions = {}, - uint32_t apiVersion = VK_API_VERSION_1_0 ); - vk::UniqueRenderPass createRenderPass( vk::UniqueDevice & device, - vk::Format colorFormat, - vk::Format depthFormat, - vk::AttachmentLoadOp loadOp = vk::AttachmentLoadOp::eClear, - vk::ImageLayout colorFinalLayout = vk::ImageLayout::ePresentSrcKHR ); + vk::Device createDevice( vk::PhysicalDevice const & physicalDevice, + uint32_t queueFamilyIndex, + std::vector const & extensions = {}, + vk::PhysicalDeviceFeatures const * physicalDeviceFeatures = nullptr, + void const * pNext = nullptr ); + std::vector createFramebuffers( vk::Device const & device, + vk::RenderPass & renderPass, + std::vector const & imageViews, + vk::ImageView const & depthImageView, + vk::Extent2D const & extent ); + vk::Pipeline + createGraphicsPipeline( vk::Device const & device, + vk::PipelineCache const & pipelineCache, + std::pair const & vertexShaderData, + std::pair const & fragmentShaderData, + uint32_t vertexStride, + std::vector> const & vertexInputAttributeFormatOffset, + vk::FrontFace frontFace, + bool depthBuffered, + vk::PipelineLayout const & pipelineLayout, + vk::RenderPass const & renderPass ); + vk::Instance createInstance( std::string const & appName, + std::string const & engineName, + std::vector const & layers = {}, + std::vector const & extensions = {}, + uint32_t apiVersion = VK_API_VERSION_1_0 ); + vk::RenderPass createRenderPass( vk::Device const & device, + vk::Format colorFormat, + vk::Format depthFormat, + vk::AttachmentLoadOp loadOp = vk::AttachmentLoadOp::eClear, + vk::ImageLayout colorFinalLayout = vk::ImageLayout::ePresentSrcKHR ); VKAPI_ATTR VkBool32 VKAPI_CALL debugUtilsMessengerCallback( VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, @@ -407,26 +441,45 @@ namespace vk uint32_t findMemoryType( vk::PhysicalDeviceMemoryProperties const & memoryProperties, uint32_t typeBits, vk::MemoryPropertyFlags requirementsMask ); - std::vector getDeviceExtensions(); - std::vector getInstanceExtensions(); - vk::Format pickDepthFormat( vk::PhysicalDevice const & physicalDevice ); - vk::PresentModeKHR pickPresentMode( std::vector const & presentModes ); - vk::SurfaceFormatKHR pickSurfaceFormat( std::vector const & formats ); - void submitAndWait( vk::UniqueDevice & device, vk::Queue queue, vk::UniqueCommandBuffer & commandBuffer ); + std::vector gatherExtensions( std::vector const & extensions +#if !defined( NDEBUG ) + , + std::vector const & extensionProperties +#endif + ); + std::vector gatherLayers( std::vector const & layers +#if !defined( NDEBUG ) + , + std::vector const & layerProperties +#endif + ); + std::vector getDeviceExtensions(); + std::vector getInstanceExtensions(); + vk::DebugUtilsMessengerCreateInfoEXT makeDebugUtilsMessengerCreateInfoEXT(); +#if defined( NDEBUG ) + vk::StructureChain +#else + vk::StructureChain +#endif + makeInstanceCreateInfoChain( vk::ApplicationInfo const & applicationInfo, + std::vector const & layers, + std::vector const & extensions ); + vk::Format pickDepthFormat( vk::PhysicalDevice const & physicalDevice ); + vk::PresentModeKHR pickPresentMode( std::vector const & presentModes ); + vk::SurfaceFormatKHR pickSurfaceFormat( std::vector const & formats ); + void submitAndWait( vk::Device const & device, vk::Queue const & queue, vk::CommandBuffer const & commandBuffer ); void updateDescriptorSets( - vk::UniqueDevice const & device, - vk::UniqueDescriptorSet const & descriptorSet, - std::vector> const & - bufferData, - vk::su::TextureData const & textureData, - uint32_t bindingOffset = 0 ); + vk::Device const & device, + vk::DescriptorSet const & descriptorSet, + std::vector> const & bufferData, + vk::su::TextureData const & textureData, + uint32_t bindingOffset = 0 ); void updateDescriptorSets( - vk::UniqueDevice const & device, - vk::UniqueDescriptorSet const & descriptorSet, - std::vector> const & - bufferData, - std::vector const & textureData, - uint32_t bindingOffset = 0 ); + vk::Device const & device, + vk::DescriptorSet const & descriptorSet, + std::vector> const & bufferData, + std::vector const & textureData, + uint32_t bindingOffset = 0 ); } // namespace su } // namespace vk diff --git a/tests/Hash/Hash.cpp b/tests/Hash/Hash.cpp index 31bcb88..999fc1a 100644 --- a/tests/Hash/Hash.cpp +++ b/tests/Hash/Hash.cpp @@ -16,7 +16,7 @@ // Compile test on using std::hash on handles #if defined( _MSC_VER ) -# pragma warning( disable : 4189 ) // local variable is initialized but not referenced +# pragma warning( disable : 4189 ) // local variable is initialized but not referenced #elif defined( __GNUC__ ) # pragma GCC diagnostic ignored "-Wunused-variable" #else @@ -40,7 +40,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::UniqueInstance instance = vk::createInstanceUnique( vk::InstanceCreateInfo( {}, &appInfo ) ); auto h1 = std::hash{}( *instance ); - auto h2 = std::hash{}( *instance ); + auto h2 = std::hash{}( static_cast( *instance ) ); assert( h1 == h2 ); std::unordered_set uset; diff --git a/vulkan/vulkan.hpp b/vulkan/vulkan.hpp index 268a45c..f77bce6 100644 --- a/vulkan/vulkan.hpp +++ b/vulkan/vulkan.hpp @@ -111908,7 +111908,8 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR = 0; public: - DispatchLoaderDynamic() VULKAN_HPP_NOEXCEPT = default; + DispatchLoaderDynamic() VULKAN_HPP_NOEXCEPT = default; + DispatchLoaderDynamic( DispatchLoaderDynamic const & rhs ) VULKAN_HPP_NOEXCEPT = default; #if !defined( VK_NO_PROTOTYPES ) // This interface is designed to be used for per-device function pointers in combination with a linked vulkan diff --git a/vulkan/vulkan_raii.hpp b/vulkan/vulkan_raii.hpp new file mode 100644 index 0000000..04d7df8 --- /dev/null +++ b/vulkan/vulkan_raii.hpp @@ -0,0 +1,10432 @@ +// Copyright 2015-2021 The Khronos Group Inc. +// +// SPDX-License-Identifier: Apache-2.0 OR MIT +// + +// This header is generated from the Khronos Vulkan XML API Registry. + +#ifndef VULKAN_RAII_HPP +#define VULKAN_RAII_HPP + +#include + +#if !defined( VULKAN_HPP_RAII_NAMESPACE ) +# define VULKAN_HPP_RAII_NAMESPACE raii +#endif + +#if !defined( VULKAN_HPP_RAII_DISPATCHER_TYPE ) +# define VULKAN_HPP_RAII_DISPATCHER_TYPE VULKAN_HPP_DEFAULT_DISPATCHER_TYPE +#endif + +namespace VULKAN_HPP_NAMESPACE +{ + namespace VULKAN_HPP_RAII_NAMESPACE + { +#if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) && !defined( VULKAN_HPP_NO_EXCEPTIONS ) + + template + VULKAN_HPP_CONSTEXPR_14 T exchange( T & obj, U && newValue ) + { +# if ( 14 <= VULKAN_HPP_CPP_VERSION ) + return std::exchange( obj, std::forward( newValue ) ); +# else + T oldValue = std::move( obj ); + obj = std::forward( newValue ); + return oldValue; +# endif + } + + class Context + { + public: + Context() + { + PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr = + m_dynamicLoader.getProcAddress( "vkGetInstanceProcAddr" ); + m_dispatcher.init( vkGetInstanceProcAddr ); + } + + ~Context() = default; + + Context( Context const & ) = delete; + Context( Context && rhs ) + : m_dynamicLoader( std::move( rhs.m_dynamicLoader ) ), m_dispatcher( std::move( rhs.m_dispatcher ) ) + {} + Context & operator=( Context const & ) = delete; + Context & operator =( Context && rhs ) + { + if ( this != &rhs ) + { + m_dynamicLoader = std::move( rhs.m_dynamicLoader ); + m_dispatcher = std::move( rhs.m_dispatcher ); + } + return *this; + } + + VULKAN_HPP_NODISCARD std::vector enumerateInstanceExtensionProperties( + Optional layerName VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT ) const; + + VULKAN_HPP_NODISCARD std::vector enumerateInstanceLayerProperties() const; + + VULKAN_HPP_NODISCARD uint32_t enumerateInstanceVersion() const; + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return &m_dispatcher; + } + + private: + vk::DynamicLoader m_dynamicLoader; + VULKAN_HPP_RAII_DISPATCHER_TYPE m_dispatcher; + }; + + class Instance + { + public: + using CType = VkInstance; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eInstance; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eInstance; + + public: + Instance( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Context const & context, + VULKAN_HPP_NAMESPACE::InstanceCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( *context.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateInstance( reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_instance ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateInstance" ); + } + m_dispatcher.init( m_instance ); + } + + Instance( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Context const & context, + VkInstance instance, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( *context.getDispatcher() ) + { + m_dispatcher.init( m_instance ); + } + + ~Instance() + { + if ( m_instance ) + { + getDispatcher()->vkDestroyInstance( static_cast( m_instance ), m_allocator ); + } + } + + Instance() = delete; + Instance( Instance const & ) = delete; + Instance( Instance && rhs ) + : m_instance( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_instance, {} ) ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + Instance & operator=( Instance const & ) = delete; + Instance & operator =( Instance && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyInstance( static_cast( m_instance ), m_allocator ); + m_instance = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_instance, {} ); + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + void debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType_, + uint64_t object, + size_t location, + int32_t messageCode, + const std::string & layerPrefix, + const std::string & message ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD std::vector + enumeratePhysicalDeviceGroups() const; + + VULKAN_HPP_NODISCARD PFN_vkVoidFunction getProcAddr( const std::string & name ) const VULKAN_HPP_NOEXCEPT; + + void + submitDebugUtilsMessageEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, + const DebugUtilsMessengerCallbackDataEXT & callbackData ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NAMESPACE::Instance const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_instance; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return &m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::Instance m_instance; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE m_dispatcher; + }; + + class PhysicalDevice + { + public: + using CType = VkPhysicalDevice; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::ePhysicalDevice; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::ePhysicalDevice; + + public: + PhysicalDevice( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VkPhysicalDevice physicalDevice ) + : m_physicalDevice( physicalDevice ), m_dispatcher( instance.getDispatcher() ) + {} + + PhysicalDevice( VkPhysicalDevice physicalDevice, VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher ) + : m_physicalDevice( physicalDevice ), m_dispatcher( dispatcher ) + {} + + PhysicalDevice() = delete; + PhysicalDevice( PhysicalDevice const & ) = delete; + PhysicalDevice( PhysicalDevice && rhs ) + : m_physicalDevice( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_physicalDevice, {} ) ) + , m_dispatcher( rhs.m_dispatcher ) + {} + PhysicalDevice & operator=( PhysicalDevice const & ) = delete; + PhysicalDevice & operator =( PhysicalDevice && rhs ) + { + if ( this != &rhs ) + { + m_physicalDevice = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_physicalDevice, {} ); + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + +# ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + void acquireXlibDisplayEXT( Display & dpy, VULKAN_HPP_NAMESPACE::DisplayKHR display ) const; +# endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + + VULKAN_HPP_NODISCARD std::vector enumerateDeviceExtensionProperties( + Optional layerName VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT ) const; + + VULKAN_HPP_NODISCARD std::vector enumerateDeviceLayerProperties() const; + + VULKAN_HPP_NODISCARD std::pair, std::vector> + enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR + getDisplayPlaneCapabilities2KHR( const DisplayPlaneInfo2KHR & displayPlaneInfo ) const; + + VULKAN_HPP_NODISCARD std::vector getCalibrateableTimeDomainsEXT() const; + + VULKAN_HPP_NODISCARD std::vector + getCooperativeMatrixPropertiesNV() const; + +# ifdef VK_USE_PLATFORM_DIRECTFB_EXT + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Bool32 + getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, IDirectFB & dfb ) const VULKAN_HPP_NOEXCEPT; +# endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + + VULKAN_HPP_NODISCARD std::vector + getDisplayPlaneProperties2KHR() const; + + VULKAN_HPP_NODISCARD std::vector + getDisplayPlanePropertiesKHR() const; + + VULKAN_HPP_NODISCARD std::vector getDisplayProperties2KHR() const; + + VULKAN_HPP_NODISCARD std::vector getDisplayPropertiesKHR() const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalBufferProperties getExternalBufferProperties( + const PhysicalDeviceExternalBufferInfo & externalBufferInfo ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalFenceProperties getExternalFenceProperties( + const PhysicalDeviceExternalFenceInfo & externalFenceInfo ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV getExternalImageFormatPropertiesNV( + VULKAN_HPP_NAMESPACE::Format format, + VULKAN_HPP_NAMESPACE::ImageType type, + VULKAN_HPP_NAMESPACE::ImageTiling tiling, + VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, + VULKAN_HPP_NAMESPACE::ImageCreateFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType + VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties getExternalSemaphoreProperties( + const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures getFeatures() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 getFeatures2() const VULKAN_HPP_NOEXCEPT; + + template + VULKAN_HPP_NODISCARD StructureChain getFeatures2() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::FormatProperties + getFormatProperties( VULKAN_HPP_NAMESPACE::Format format ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::FormatProperties2 + getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format ) const VULKAN_HPP_NOEXCEPT; + + template + VULKAN_HPP_NODISCARD StructureChain + getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD std::vector + getFragmentShadingRatesKHR() const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ImageFormatProperties getImageFormatProperties( + VULKAN_HPP_NAMESPACE::Format format, + VULKAN_HPP_NAMESPACE::ImageType type, + VULKAN_HPP_NAMESPACE::ImageTiling tiling, + VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, + VULKAN_HPP_NAMESPACE::ImageCreateFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ImageFormatProperties2 + getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo ) const; + + template + VULKAN_HPP_NODISCARD StructureChain + getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties + getMemoryProperties() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 + getMemoryProperties2() const VULKAN_HPP_NOEXCEPT; + + template + VULKAN_HPP_NODISCARD StructureChain getMemoryProperties2() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT + getMultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD std::vector + getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties getProperties() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 getProperties2() const VULKAN_HPP_NOEXCEPT; + + template + VULKAN_HPP_NODISCARD StructureChain getProperties2() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD uint32_t getQueueFamilyPerformanceQueryPassesKHR( + const QueryPoolPerformanceCreateInfoKHR & performanceQueryCreateInfo ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD std::vector + getQueueFamilyProperties() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD std::vector + getQueueFamilyProperties2() const VULKAN_HPP_NOEXCEPT; + + template + VULKAN_HPP_NODISCARD std::vector getQueueFamilyProperties2() const; + + VULKAN_HPP_NODISCARD std::vector + getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, + VULKAN_HPP_NAMESPACE::ImageType type, + VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, + VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, + VULKAN_HPP_NAMESPACE::ImageTiling tiling ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD std::vector + getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo ) const + VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD std::vector + getSupportedFramebufferMixedSamplesCombinationsNV() const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT + getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR + getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const; + + template + VULKAN_HPP_NODISCARD StructureChain + getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR + getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const; + + VULKAN_HPP_NODISCARD std::vector + getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const; + + VULKAN_HPP_NODISCARD std::vector + getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const; + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD std::vector + getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const; +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD std::vector + getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Bool32 + getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const; + + VULKAN_HPP_NODISCARD std::vector + getToolPropertiesEXT() const; + +# ifdef VK_USE_PLATFORM_WAYLAND_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Bool32 + getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, + struct wl_display & display ) const VULKAN_HPP_NOEXCEPT; +# endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Bool32 + getWin32PresentationSupportKHR( uint32_t queueFamilyIndex ) const VULKAN_HPP_NOEXCEPT; +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +# ifdef VK_USE_PLATFORM_XCB_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Bool32 getXcbPresentationSupportKHR( + uint32_t queueFamilyIndex, xcb_connection_t & connection, xcb_visualid_t visual_id ) const VULKAN_HPP_NOEXCEPT; +# endif /*VK_USE_PLATFORM_XCB_KHR*/ + +# ifdef VK_USE_PLATFORM_XLIB_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Bool32 getXlibPresentationSupportKHR( + uint32_t queueFamilyIndex, Display & dpy, VisualID visualID ) const VULKAN_HPP_NOEXCEPT; +# endif /*VK_USE_PLATFORM_XLIB_KHR*/ + + VULKAN_HPP_NAMESPACE::PhysicalDevice const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_physicalDevice; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::PhysicalDevice m_physicalDevice; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class PhysicalDevices : public std::vector + { + public: + PhysicalDevices( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance ) + { + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher = instance.getDispatcher(); + std::vector physicalDevices; + uint32_t physicalDeviceCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( dispatcher->vkEnumeratePhysicalDevices( + static_cast( *instance ), &physicalDeviceCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && physicalDeviceCount ) + { + physicalDevices.resize( physicalDeviceCount ); + result = static_cast( dispatcher->vkEnumeratePhysicalDevices( + static_cast( *instance ), &physicalDeviceCount, physicalDevices.data() ) ); + VULKAN_HPP_ASSERT( physicalDeviceCount <= physicalDevices.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + this->reserve( physicalDeviceCount ); + for ( auto const & physicalDevice : physicalDevices ) + { + this->emplace_back( physicalDevice, dispatcher ); + } + } + else + { + throwResultException( result, "vkEnumeratePhysicalDevices" ); + } + } + + PhysicalDevices() = delete; + PhysicalDevices( PhysicalDevices const & ) = delete; + PhysicalDevices( PhysicalDevices && rhs ) = default; + PhysicalDevices & operator=( PhysicalDevices const & ) = delete; + PhysicalDevices & operator=( PhysicalDevices && rhs ) = default; + }; + + class Device + { + public: + using CType = VkDevice; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eDevice; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDevice; + + public: + Device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::PhysicalDevice const & physicalDevice, + VULKAN_HPP_NAMESPACE::DeviceCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( *physicalDevice.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateDevice( static_cast( *physicalDevice ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_device ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateDevice" ); + } + m_dispatcher.init( m_device ); + } + + Device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::PhysicalDevice const & physicalDevice, + VkDevice device, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( *physicalDevice.getDispatcher() ) + { + m_dispatcher.init( m_device ); + } + + ~Device() + { + if ( m_device ) + { + getDispatcher()->vkDestroyDevice( static_cast( m_device ), m_allocator ); + } + } + + Device() = delete; + Device( Device const & ) = delete; + Device( Device && rhs ) + : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + Device & operator=( Device const & ) = delete; + Device & operator =( Device && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyDevice( static_cast( m_device ), m_allocator ); + m_device = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ); + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NODISCARD std::pair + acquireNextImage2KHR( const AcquireNextImageInfoKHR & acquireInfo ) const; + + void acquireProfilingLockKHR( const AcquireProfilingLockInfoKHR & info ) const; + + void bindAccelerationStructureMemoryNV( + ArrayProxy const & bindInfos ) const; + + void bindBufferMemory2( ArrayProxy const & bindInfos ) const; + + void bindImageMemory2( ArrayProxy const & bindInfos ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result buildAccelerationStructuresKHR( + VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, + ArrayProxy const & infos, + ArrayProxy const & + pBuildRangeInfos ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result + copyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, + const CopyAccelerationStructureInfoKHR & info ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result + copyAccelerationStructureToMemoryKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, + const CopyAccelerationStructureToMemoryInfoKHR & info ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result + copyMemoryToAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, + const CopyMemoryToAccelerationStructureInfoKHR & info ) const; + + void debugMarkerSetObjectNameEXT( const DebugMarkerObjectNameInfoEXT & nameInfo ) const; + + void debugMarkerSetObjectTagEXT( const DebugMarkerObjectTagInfoEXT & tagInfo ) const; + + void waitIdle() const; + + void displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, + const DisplayPowerInfoEXT & displayPowerInfo ) const; + + void + flushMappedMemoryRanges( ArrayProxy const & memoryRanges ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR + getAccelerationStructureBuildSizesKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR buildType, + const AccelerationStructureBuildGeometryInfoKHR & buildInfo, + ArrayProxy const & maxPrimitiveCounts + VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT ) const + VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DeviceAddress getAccelerationStructureAddressKHR( + const AccelerationStructureDeviceAddressInfoKHR & info ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR getAccelerationStructureMemoryRequirementsNV( + const AccelerationStructureMemoryRequirementsInfoNV & info ) const VULKAN_HPP_NOEXCEPT; + + template + VULKAN_HPP_NODISCARD StructureChain getAccelerationStructureMemoryRequirementsNV( + const AccelerationStructureMemoryRequirementsInfoNV & info ) const VULKAN_HPP_NOEXCEPT; + +# ifdef VK_USE_PLATFORM_ANDROID_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID + getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer ) const; +# endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + +# ifdef VK_USE_PLATFORM_ANDROID_KHR + template + VULKAN_HPP_NODISCARD StructureChain + getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer ) const; +# endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DeviceAddress + getBufferAddress( const BufferDeviceAddressInfo & info ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 + getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info ) const VULKAN_HPP_NOEXCEPT; + + template + VULKAN_HPP_NODISCARD StructureChain + getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD uint64_t + getBufferOpaqueCaptureAddress( const BufferDeviceAddressInfo & info ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD std::pair, uint64_t> getCalibratedTimestampsEXT( + ArrayProxy const & timestampInfos ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport + getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo ) const VULKAN_HPP_NOEXCEPT; + + template + VULKAN_HPP_NODISCARD StructureChain + getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::AccelerationStructureCompatibilityKHR + getAccelerationStructureCompatibilityKHR( const AccelerationStructureVersionInfoKHR & versionInfo ) const + VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags getGroupPeerMemoryFeatures( + uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR + getGroupPresentCapabilitiesKHR() const; + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR + getGroupSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const; +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR + getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const; + + VULKAN_HPP_NODISCARD uint64_t + getMemoryOpaqueCaptureAddress( const DeviceMemoryOpaqueCaptureAddressInfo & info ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD PFN_vkVoidFunction getProcAddr( const std::string & name ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD int getFenceFdKHR( const FenceGetFdInfoKHR & getFdInfo ) const; + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD HANDLE getFenceWin32HandleKHR( const FenceGetWin32HandleInfoKHR & getWin32HandleInfo ) const; +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 getGeneratedCommandsMemoryRequirementsNV( + const GeneratedCommandsMemoryRequirementsInfoNV & info ) const VULKAN_HPP_NOEXCEPT; + + template + VULKAN_HPP_NODISCARD StructureChain getGeneratedCommandsMemoryRequirementsNV( + const GeneratedCommandsMemoryRequirementsInfoNV & info ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 + getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info ) const VULKAN_HPP_NOEXCEPT; + + template + VULKAN_HPP_NODISCARD StructureChain + getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD std::vector + getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD uint32_t + getImageViewHandleNVX( const ImageViewHandleInfoNVX & info ) const VULKAN_HPP_NOEXCEPT; + +# ifdef VK_USE_PLATFORM_ANDROID_KHR + VULKAN_HPP_NODISCARD struct AHardwareBuffer * + getMemoryAndroidHardwareBufferANDROID( const MemoryGetAndroidHardwareBufferInfoANDROID & info ) const; +# endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + VULKAN_HPP_NODISCARD int getMemoryFdKHR( const MemoryGetFdInfoKHR & getFdInfo ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR + getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, int fd ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT + getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, + const void * pHostPointer ) const; + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD HANDLE + getMemoryWin32HandleKHR( const MemoryGetWin32HandleInfoKHR & getWin32HandleInfo ) const; +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR + getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, + HANDLE handle ) const; +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PerformanceValueINTEL + getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter ) const; + + VULKAN_HPP_NODISCARD std::vector + getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo ) const; + + VULKAN_HPP_NODISCARD std::vector + getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo ) const; + + VULKAN_HPP_NODISCARD std::vector + getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo ) const; + + VULKAN_HPP_NODISCARD uint64_t + getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType_, + uint64_t objectHandle, + VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD int getSemaphoreFdKHR( const SemaphoreGetFdInfoKHR & getFdInfo ) const; + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD HANDLE + getSemaphoreWin32HandleKHR( const SemaphoreGetWin32HandleInfoKHR & getWin32HandleInfo ) const; +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + void importFenceFdKHR( const ImportFenceFdInfoKHR & importFenceFdInfo ) const; + +# ifdef VK_USE_PLATFORM_WIN32_KHR + void importFenceWin32HandleKHR( const ImportFenceWin32HandleInfoKHR & importFenceWin32HandleInfo ) const; +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + void importSemaphoreFdKHR( const ImportSemaphoreFdInfoKHR & importSemaphoreFdInfo ) const; + +# ifdef VK_USE_PLATFORM_WIN32_KHR + void + importSemaphoreWin32HandleKHR( const ImportSemaphoreWin32HandleInfoKHR & importSemaphoreWin32HandleInfo ) const; +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + void initializePerformanceApiINTEL( const InitializePerformanceApiInfoINTEL & initializeInfo ) const; + + void invalidateMappedMemoryRanges( + ArrayProxy const & memoryRanges ) const; + + void releaseProfilingLockKHR() const VULKAN_HPP_NOEXCEPT; + + void resetFences( ArrayProxy const & fences ) const; + + void setDebugUtilsObjectNameEXT( const DebugUtilsObjectNameInfoEXT & nameInfo ) const; + + void setDebugUtilsObjectTagEXT( const DebugUtilsObjectTagInfoEXT & tagInfo ) const; + + void setHdrMetadataEXT( ArrayProxy const & swapchains, + ArrayProxy const & metadata ) const + VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; + + void setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType_, + uint64_t objectHandle, + VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, + uint64_t data ) const; + + void signalSemaphore( const SemaphoreSignalInfo & signalInfo ) const; + + void uninitializePerformanceApiINTEL() const VULKAN_HPP_NOEXCEPT; + + void updateDescriptorSets( + ArrayProxy const & descriptorWrites, + ArrayProxy const & descriptorCopies ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result + waitForFences( ArrayProxy const & fences, + VULKAN_HPP_NAMESPACE::Bool32 waitAll, + uint64_t timeout ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result waitSemaphores( const SemaphoreWaitInfo & waitInfo, + uint64_t timeout ) const; + + template + VULKAN_HPP_NODISCARD std::vector writeAccelerationStructuresPropertiesKHR( + ArrayProxy const & accelerationStructures, + VULKAN_HPP_NAMESPACE::QueryType queryType, + size_t dataSize, + size_t stride ) const; + + template + VULKAN_HPP_NODISCARD T writeAccelerationStructuresPropertyKHR( + ArrayProxy const & accelerationStructures, + VULKAN_HPP_NAMESPACE::QueryType queryType, + size_t stride ) const; + + VULKAN_HPP_NAMESPACE::Device const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_device; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return &m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::Device m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE m_dispatcher; + }; + + class AccelerationStructureKHR + { + public: + using CType = VkAccelerationStructureKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eAccelerationStructureKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eAccelerationStructureKHR; + + public: + AccelerationStructureKHR( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoKHR const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateAccelerationStructureKHR( + static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_accelerationStructureKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateAccelerationStructureKHR" ); + } + } + + AccelerationStructureKHR( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkAccelerationStructureKHR accelerationStructureKHR, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_accelerationStructureKHR( accelerationStructureKHR ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~AccelerationStructureKHR() + { + if ( m_accelerationStructureKHR ) + { + getDispatcher()->vkDestroyAccelerationStructureKHR( + m_device, static_cast( m_accelerationStructureKHR ), m_allocator ); + } + } + + AccelerationStructureKHR() = delete; + AccelerationStructureKHR( AccelerationStructureKHR const & ) = delete; + AccelerationStructureKHR( AccelerationStructureKHR && rhs ) + : m_accelerationStructureKHR( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_accelerationStructureKHR, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + AccelerationStructureKHR & operator=( AccelerationStructureKHR const & ) = delete; + AccelerationStructureKHR & operator =( AccelerationStructureKHR && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyAccelerationStructureKHR( + m_device, static_cast( m_accelerationStructureKHR ), m_allocator ); + m_accelerationStructureKHR = + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_accelerationStructureKHR, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::AccelerationStructureKHR const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureKHR; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::AccelerationStructureKHR m_accelerationStructureKHR; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class AccelerationStructureNV + { + public: + using CType = VkAccelerationStructureNV; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eAccelerationStructureNV; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eAccelerationStructureNV; + + public: + AccelerationStructureNV( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoNV const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateAccelerationStructureNV( + static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_accelerationStructureNV ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateAccelerationStructureNV" ); + } + } + + AccelerationStructureNV( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkAccelerationStructureNV accelerationStructureNV, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_accelerationStructureNV( accelerationStructureNV ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~AccelerationStructureNV() + { + if ( m_accelerationStructureNV ) + { + getDispatcher()->vkDestroyAccelerationStructureNV( + m_device, static_cast( m_accelerationStructureNV ), m_allocator ); + } + } + + AccelerationStructureNV() = delete; + AccelerationStructureNV( AccelerationStructureNV const & ) = delete; + AccelerationStructureNV( AccelerationStructureNV && rhs ) + : m_accelerationStructureNV( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_accelerationStructureNV, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + AccelerationStructureNV & operator=( AccelerationStructureNV const & ) = delete; + AccelerationStructureNV & operator =( AccelerationStructureNV && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyAccelerationStructureNV( + m_device, static_cast( m_accelerationStructureNV ), m_allocator ); + m_accelerationStructureNV = + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_accelerationStructureNV, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + template + VULKAN_HPP_NODISCARD std::vector getHandle( size_t dataSize ) const; + + template + VULKAN_HPP_NODISCARD T getHandle() const; + + VULKAN_HPP_NAMESPACE::AccelerationStructureNV const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureNV; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::AccelerationStructureNV m_accelerationStructureNV; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class Buffer + { + public: + using CType = VkBuffer; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eBuffer; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eBuffer; + + public: + Buffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::BufferCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateBuffer( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_buffer ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateBuffer" ); + } + } + + Buffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkBuffer buffer, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_buffer( buffer ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~Buffer() + { + if ( m_buffer ) + { + getDispatcher()->vkDestroyBuffer( m_device, static_cast( m_buffer ), m_allocator ); + } + } + + Buffer() = delete; + Buffer( Buffer const & ) = delete; + Buffer( Buffer && rhs ) + : m_buffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_buffer, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + Buffer & operator=( Buffer const & ) = delete; + Buffer & operator =( Buffer && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyBuffer( m_device, static_cast( m_buffer ), m_allocator ); + m_buffer = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_buffer, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + void bindMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements getMemoryRequirements() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NAMESPACE::Buffer const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_buffer; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::Buffer m_buffer; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class BufferView + { + public: + using CType = VkBufferView; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eBufferView; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eBufferView; + + public: + BufferView( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::BufferViewCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateBufferView( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_bufferView ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateBufferView" ); + } + } + + BufferView( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkBufferView bufferView, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_bufferView( bufferView ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~BufferView() + { + if ( m_bufferView ) + { + getDispatcher()->vkDestroyBufferView( m_device, static_cast( m_bufferView ), m_allocator ); + } + } + + BufferView() = delete; + BufferView( BufferView const & ) = delete; + BufferView( BufferView && rhs ) + : m_bufferView( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_bufferView, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + BufferView & operator=( BufferView const & ) = delete; + BufferView & operator =( BufferView && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyBufferView( m_device, static_cast( m_bufferView ), m_allocator ); + m_bufferView = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_bufferView, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::BufferView const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_bufferView; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::BufferView m_bufferView; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class CommandPool + { + public: + using CType = VkCommandPool; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eCommandPool; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eCommandPool; + + public: + CommandPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::CommandPoolCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateCommandPool( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_commandPool ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateCommandPool" ); + } + } + + CommandPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkCommandPool commandPool, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_commandPool( commandPool ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~CommandPool() + { + if ( m_commandPool ) + { + getDispatcher()->vkDestroyCommandPool( m_device, static_cast( m_commandPool ), m_allocator ); + } + } + + CommandPool() = delete; + CommandPool( CommandPool const & ) = delete; + CommandPool( CommandPool && rhs ) + : m_commandPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_commandPool, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + CommandPool & operator=( CommandPool const & ) = delete; + CommandPool & operator =( CommandPool && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyCommandPool( m_device, static_cast( m_commandPool ), m_allocator ); + m_commandPool = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_commandPool, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + void reset( VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const; + + void trim( VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const + VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NAMESPACE::CommandPool const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_commandPool; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::CommandPool m_commandPool; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class CommandBuffer + { + public: + using CType = VkCommandBuffer; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eCommandBuffer; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eCommandBuffer; + + public: + CommandBuffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkCommandBuffer commandBuffer, + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::CommandPool const & commandPool ) + : m_commandBuffer( commandBuffer ) + , m_device( *device ) + , m_commandPool( *commandPool ) + , m_dispatcher( device.getDispatcher() ) + {} + + CommandBuffer( VkCommandBuffer commandBuffer, + VkDevice device, + VkCommandPool commandPool, + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher ) + : m_commandBuffer( commandBuffer ), m_device( device ), m_commandPool( commandPool ), m_dispatcher( dispatcher ) + {} + + ~CommandBuffer() + { + if ( m_commandBuffer ) + { + getDispatcher()->vkFreeCommandBuffers( + m_device, m_commandPool, 1, reinterpret_cast( &m_commandBuffer ) ); + } + } + + CommandBuffer() = delete; + CommandBuffer( CommandBuffer const & ) = delete; + CommandBuffer( CommandBuffer && rhs ) + : m_commandBuffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_commandBuffer, {} ) ) + , m_device( rhs.m_device ) + , m_commandPool( rhs.m_commandPool ) + , m_dispatcher( rhs.m_dispatcher ) + {} + CommandBuffer & operator=( CommandBuffer const & ) = delete; + CommandBuffer & operator =( CommandBuffer && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkFreeCommandBuffers( + m_device, m_commandPool, 1, reinterpret_cast( &m_commandBuffer ) ); + m_commandBuffer = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_commandBuffer, {} ); + m_device = rhs.m_device; + m_commandPool = rhs.m_commandPool; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + void begin( const CommandBufferBeginInfo & beginInfo ) const; + + void beginConditionalRenderingEXT( const ConditionalRenderingBeginInfoEXT & conditionalRenderingBegin ) const + VULKAN_HPP_NOEXCEPT; + + void beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo ) const VULKAN_HPP_NOEXCEPT; + + void beginQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t query, + VULKAN_HPP_NAMESPACE::QueryControlFlags flags + VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + void beginQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t query, + VULKAN_HPP_NAMESPACE::QueryControlFlags flags, + uint32_t index ) const VULKAN_HPP_NOEXCEPT; + + void beginRenderPass( const RenderPassBeginInfo & renderPassBegin, + VULKAN_HPP_NAMESPACE::SubpassContents contents ) const VULKAN_HPP_NOEXCEPT; + + void beginRenderPass2( const RenderPassBeginInfo & renderPassBegin, + const SubpassBeginInfo & subpassBeginInfo ) const VULKAN_HPP_NOEXCEPT; + + void beginTransformFeedbackEXT( uint32_t firstCounterBuffer, + ArrayProxy const & counterBuffers, + ArrayProxy const & counterBufferOffsets + VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT ) const + VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; + + void bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t firstSet, + ArrayProxy const & descriptorSets, + ArrayProxy const & dynamicOffsets ) const VULKAN_HPP_NOEXCEPT; + + void bindIndexBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::IndexType indexType ) const VULKAN_HPP_NOEXCEPT; + + void bindPipeline( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::Pipeline pipeline ) const VULKAN_HPP_NOEXCEPT; + + void bindPipelineShaderGroupNV( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::Pipeline pipeline, + uint32_t groupIndex ) const VULKAN_HPP_NOEXCEPT; + + void bindShadingRateImageNV( VULKAN_HPP_NAMESPACE::ImageView imageView, + VULKAN_HPP_NAMESPACE::ImageLayout imageLayout ) const VULKAN_HPP_NOEXCEPT; + + void bindTransformFeedbackBuffersEXT( uint32_t firstBinding, + ArrayProxy const & buffers, + ArrayProxy const & offsets, + ArrayProxy const & sizes + VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT ) const + VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; + + void bindVertexBuffers( uint32_t firstBinding, + ArrayProxy const & buffers, + ArrayProxy const & offsets ) const + VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; + + void bindVertexBuffers2EXT( + uint32_t firstBinding, + ArrayProxy const & buffers, + ArrayProxy const & offsets, + ArrayProxy const & sizes VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, + ArrayProxy const & strides + VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; + + void blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, + VULKAN_HPP_NAMESPACE::Image dstImage, + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, + ArrayProxy const & regions, + VULKAN_HPP_NAMESPACE::Filter filter ) const VULKAN_HPP_NOEXCEPT; + + void blitImage2KHR( const BlitImageInfo2KHR & blitImageInfo ) const VULKAN_HPP_NOEXCEPT; + + void buildAccelerationStructureNV( const AccelerationStructureInfoNV & info, + VULKAN_HPP_NAMESPACE::Buffer instanceData, + VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, + VULKAN_HPP_NAMESPACE::Bool32 update, + VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, + VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, + VULKAN_HPP_NAMESPACE::Buffer scratch, + VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset ) const VULKAN_HPP_NOEXCEPT; + + void buildAccelerationStructuresIndirectKHR( + ArrayProxy const & infos, + ArrayProxy const & indirectDeviceAddresses, + ArrayProxy const & indirectStrides, + ArrayProxy const & pMaxPrimitiveCounts ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; + + void buildAccelerationStructuresKHR( + ArrayProxy const & infos, + ArrayProxy const & + pBuildRangeInfos ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; + + void + clearAttachments( ArrayProxy const & attachments, + ArrayProxy const & rects ) const VULKAN_HPP_NOEXCEPT; + + void clearColorImage( VULKAN_HPP_NAMESPACE::Image image, + VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, + const ClearColorValue & color, + ArrayProxy const & ranges ) const + VULKAN_HPP_NOEXCEPT; + + void clearDepthStencilImage( VULKAN_HPP_NAMESPACE::Image image, + VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, + const ClearDepthStencilValue & depthStencil, + ArrayProxy const & ranges ) const + VULKAN_HPP_NOEXCEPT; + + void copyAccelerationStructureKHR( const CopyAccelerationStructureInfoKHR & info ) const VULKAN_HPP_NOEXCEPT; + + void copyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, + VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, + VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode ) const + VULKAN_HPP_NOEXCEPT; + + void copyAccelerationStructureToMemoryKHR( const CopyAccelerationStructureToMemoryInfoKHR & info ) const + VULKAN_HPP_NOEXCEPT; + + void copyBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, + VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + ArrayProxy const & regions ) const VULKAN_HPP_NOEXCEPT; + + void copyBuffer2KHR( const CopyBufferInfo2KHR & copyBufferInfo ) const VULKAN_HPP_NOEXCEPT; + + void copyBufferToImage( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, + VULKAN_HPP_NAMESPACE::Image dstImage, + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, + ArrayProxy const & regions ) const + VULKAN_HPP_NOEXCEPT; + + void copyBufferToImage2KHR( const CopyBufferToImageInfo2KHR & copyBufferToImageInfo ) const VULKAN_HPP_NOEXCEPT; + + void copyImage( VULKAN_HPP_NAMESPACE::Image srcImage, + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, + VULKAN_HPP_NAMESPACE::Image dstImage, + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, + ArrayProxy const & regions ) const VULKAN_HPP_NOEXCEPT; + + void copyImage2KHR( const CopyImageInfo2KHR & copyImageInfo ) const VULKAN_HPP_NOEXCEPT; + + void copyImageToBuffer( VULKAN_HPP_NAMESPACE::Image srcImage, + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, + VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + ArrayProxy const & regions ) const + VULKAN_HPP_NOEXCEPT; + + void copyImageToBuffer2KHR( const CopyImageToBufferInfo2KHR & copyImageToBufferInfo ) const VULKAN_HPP_NOEXCEPT; + + void copyMemoryToAccelerationStructureKHR( const CopyMemoryToAccelerationStructureInfoKHR & info ) const + VULKAN_HPP_NOEXCEPT; + + void copyQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, + VULKAN_HPP_NAMESPACE::DeviceSize stride, + VULKAN_HPP_NAMESPACE::QueryResultFlags flags + VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + void debugMarkerBeginEXT( const DebugMarkerMarkerInfoEXT & markerInfo ) const VULKAN_HPP_NOEXCEPT; + + void debugMarkerEndEXT() const VULKAN_HPP_NOEXCEPT; + + void debugMarkerInsertEXT( const DebugMarkerMarkerInfoEXT & markerInfo ) const VULKAN_HPP_NOEXCEPT; + + void dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ ) const VULKAN_HPP_NOEXCEPT; + + void dispatchBase( uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ ) const VULKAN_HPP_NOEXCEPT; + + void dispatchIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset ) const VULKAN_HPP_NOEXCEPT; + + void draw( uint32_t vertexCount, + uint32_t instanceCount, + uint32_t firstVertex, + uint32_t firstInstance ) const VULKAN_HPP_NOEXCEPT; + + void drawIndexed( uint32_t indexCount, + uint32_t instanceCount, + uint32_t firstIndex, + int32_t vertexOffset, + uint32_t firstInstance ) const VULKAN_HPP_NOEXCEPT; + + void drawIndexedIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + uint32_t drawCount, + uint32_t stride ) const VULKAN_HPP_NOEXCEPT; + + void drawIndexedIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::Buffer countBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride ) const VULKAN_HPP_NOEXCEPT; + + void drawIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + uint32_t drawCount, + uint32_t stride ) const VULKAN_HPP_NOEXCEPT; + + void drawIndirectByteCountEXT( uint32_t instanceCount, + uint32_t firstInstance, + VULKAN_HPP_NAMESPACE::Buffer counterBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize counterBufferOffset, + uint32_t counterOffset, + uint32_t vertexStride ) const VULKAN_HPP_NOEXCEPT; + + void drawIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::Buffer countBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride ) const VULKAN_HPP_NOEXCEPT; + + void drawMeshTasksIndirectCountNV( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::Buffer countBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride ) const VULKAN_HPP_NOEXCEPT; + + void drawMeshTasksIndirectNV( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + uint32_t drawCount, + uint32_t stride ) const VULKAN_HPP_NOEXCEPT; + + void drawMeshTasksNV( uint32_t taskCount, uint32_t firstTask ) const VULKAN_HPP_NOEXCEPT; + + void endConditionalRenderingEXT() const VULKAN_HPP_NOEXCEPT; + + void endDebugUtilsLabelEXT() const VULKAN_HPP_NOEXCEPT; + + void endQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query ) const VULKAN_HPP_NOEXCEPT; + + void endQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t query, + uint32_t index ) const VULKAN_HPP_NOEXCEPT; + + void endRenderPass() const VULKAN_HPP_NOEXCEPT; + + void endRenderPass2( const SubpassEndInfo & subpassEndInfo ) const VULKAN_HPP_NOEXCEPT; + + void endTransformFeedbackEXT( uint32_t firstCounterBuffer, + ArrayProxy const & counterBuffers, + ArrayProxy const & counterBufferOffsets + VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT ) const + VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; + + void executeCommands( ArrayProxy const & commandBuffers ) const + VULKAN_HPP_NOEXCEPT; + + void + executeGeneratedCommandsNV( VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, + const GeneratedCommandsInfoNV & generatedCommandsInfo ) const VULKAN_HPP_NOEXCEPT; + + void fillBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, + VULKAN_HPP_NAMESPACE::DeviceSize size, + uint32_t data ) const VULKAN_HPP_NOEXCEPT; + + void insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo ) const VULKAN_HPP_NOEXCEPT; + + void nextSubpass( VULKAN_HPP_NAMESPACE::SubpassContents contents ) const VULKAN_HPP_NOEXCEPT; + + void nextSubpass2( const SubpassBeginInfo & subpassBeginInfo, + const SubpassEndInfo & subpassEndInfo ) const VULKAN_HPP_NOEXCEPT; + + void pipelineBarrier( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, + VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, + VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, + ArrayProxy const & memoryBarriers, + ArrayProxy const & bufferMemoryBarriers, + ArrayProxy const & imageMemoryBarriers ) + const VULKAN_HPP_NOEXCEPT; + + void pipelineBarrier2KHR( const DependencyInfoKHR & dependencyInfo ) const VULKAN_HPP_NOEXCEPT; + + void preprocessGeneratedCommandsNV( const GeneratedCommandsInfoNV & generatedCommandsInfo ) const + VULKAN_HPP_NOEXCEPT; + + template + void pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, + VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, + uint32_t offset, + ArrayProxy const & values ) const VULKAN_HPP_NOEXCEPT; + + void pushDescriptorSetKHR( + VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + ArrayProxy const & descriptorWrites ) const VULKAN_HPP_NOEXCEPT; + + void pushDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + const void * pData ) const VULKAN_HPP_NOEXCEPT; + + void resetEvent( VULKAN_HPP_NAMESPACE::Event event, + VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask ) const VULKAN_HPP_NOEXCEPT; + + void resetEvent2KHR( VULKAN_HPP_NAMESPACE::Event event, + VULKAN_HPP_NAMESPACE::PipelineStageFlags2KHR stageMask ) const VULKAN_HPP_NOEXCEPT; + + void resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount ) const VULKAN_HPP_NOEXCEPT; + + void + resolveImage( VULKAN_HPP_NAMESPACE::Image srcImage, + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, + VULKAN_HPP_NAMESPACE::Image dstImage, + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, + ArrayProxy const & regions ) const VULKAN_HPP_NOEXCEPT; + + void resolveImage2KHR( const ResolveImageInfo2KHR & resolveImageInfo ) const VULKAN_HPP_NOEXCEPT; + + void setBlendConstants( const float blendConstants[4] ) const VULKAN_HPP_NOEXCEPT; + + void setCheckpointNV( const void * pCheckpointMarker ) const VULKAN_HPP_NOEXCEPT; + + void setCoarseSampleOrderNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, + ArrayProxy const & + customSampleOrders ) const VULKAN_HPP_NOEXCEPT; + + void setCullModeEXT( VULKAN_HPP_NAMESPACE::CullModeFlags cullMode VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const + VULKAN_HPP_NOEXCEPT; + + void setDepthBias( float depthBiasConstantFactor, + float depthBiasClamp, + float depthBiasSlopeFactor ) const VULKAN_HPP_NOEXCEPT; + + void setDepthBounds( float minDepthBounds, float maxDepthBounds ) const VULKAN_HPP_NOEXCEPT; + + void setDepthBoundsTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthBoundsTestEnable ) const VULKAN_HPP_NOEXCEPT; + + void setDepthCompareOpEXT( VULKAN_HPP_NAMESPACE::CompareOp depthCompareOp ) const VULKAN_HPP_NOEXCEPT; + + void setDepthTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthTestEnable ) const VULKAN_HPP_NOEXCEPT; + + void setDepthWriteEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthWriteEnable ) const VULKAN_HPP_NOEXCEPT; + + void setDeviceMask( uint32_t deviceMask ) const VULKAN_HPP_NOEXCEPT; + + void setDiscardRectangleEXT( uint32_t firstDiscardRectangle, + ArrayProxy const & discardRectangles ) const + VULKAN_HPP_NOEXCEPT; + + void setEvent( VULKAN_HPP_NAMESPACE::Event event, + VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask ) const VULKAN_HPP_NOEXCEPT; + + void setEvent2KHR( VULKAN_HPP_NAMESPACE::Event event, + const DependencyInfoKHR & dependencyInfo ) const VULKAN_HPP_NOEXCEPT; + + void setExclusiveScissorNV( uint32_t firstExclusiveScissor, + ArrayProxy const & exclusiveScissors ) const + VULKAN_HPP_NOEXCEPT; + + void setFragmentShadingRateEnumNV( + VULKAN_HPP_NAMESPACE::FragmentShadingRateNV shadingRate, + const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2] ) const VULKAN_HPP_NOEXCEPT; + + void setFragmentShadingRateKHR( + const Extent2D & fragmentSize, + const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2] ) const VULKAN_HPP_NOEXCEPT; + + void setFrontFaceEXT( VULKAN_HPP_NAMESPACE::FrontFace frontFace ) const VULKAN_HPP_NOEXCEPT; + + void setLineStippleEXT( uint32_t lineStippleFactor, uint16_t lineStipplePattern ) const VULKAN_HPP_NOEXCEPT; + + void setLineWidth( float lineWidth ) const VULKAN_HPP_NOEXCEPT; + + void setPerformanceMarkerINTEL( const PerformanceMarkerInfoINTEL & markerInfo ) const; + + void setPerformanceOverrideINTEL( const PerformanceOverrideInfoINTEL & overrideInfo ) const; + + void setPerformanceStreamMarkerINTEL( const PerformanceStreamMarkerInfoINTEL & markerInfo ) const; + + void + setPrimitiveTopologyEXT( VULKAN_HPP_NAMESPACE::PrimitiveTopology primitiveTopology ) const VULKAN_HPP_NOEXCEPT; + + void setRayTracingPipelineStackSizeKHR( uint32_t pipelineStackSize ) const VULKAN_HPP_NOEXCEPT; + + void setSampleLocationsEXT( const SampleLocationsInfoEXT & sampleLocationsInfo ) const VULKAN_HPP_NOEXCEPT; + + void setScissor( uint32_t firstScissor, + ArrayProxy const & scissors ) const VULKAN_HPP_NOEXCEPT; + + void setScissorWithCountEXT( ArrayProxy const & scissors ) const + VULKAN_HPP_NOEXCEPT; + + void setStencilCompareMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, + uint32_t compareMask ) const VULKAN_HPP_NOEXCEPT; + + void setStencilOpEXT( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, + VULKAN_HPP_NAMESPACE::StencilOp failOp, + VULKAN_HPP_NAMESPACE::StencilOp passOp, + VULKAN_HPP_NAMESPACE::StencilOp depthFailOp, + VULKAN_HPP_NAMESPACE::CompareOp compareOp ) const VULKAN_HPP_NOEXCEPT; + + void setStencilReference( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, + uint32_t reference ) const VULKAN_HPP_NOEXCEPT; + + void setStencilTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 stencilTestEnable ) const VULKAN_HPP_NOEXCEPT; + + void setStencilWriteMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, + uint32_t writeMask ) const VULKAN_HPP_NOEXCEPT; + + void setViewport( uint32_t firstViewport, + ArrayProxy const & viewports ) const VULKAN_HPP_NOEXCEPT; + + void setViewportShadingRatePaletteNV( uint32_t firstViewport, + ArrayProxy const & + shadingRatePalettes ) const VULKAN_HPP_NOEXCEPT; + + void setViewportWScalingNV( uint32_t firstViewport, + ArrayProxy const & viewportWScalings ) + const VULKAN_HPP_NOEXCEPT; + + void setViewportWithCountEXT( ArrayProxy const & viewports ) const + VULKAN_HPP_NOEXCEPT; + + void traceRaysIndirectKHR( const StridedDeviceAddressRegionKHR & raygenShaderBindingTable, + const StridedDeviceAddressRegionKHR & missShaderBindingTable, + const StridedDeviceAddressRegionKHR & hitShaderBindingTable, + const StridedDeviceAddressRegionKHR & callableShaderBindingTable, + VULKAN_HPP_NAMESPACE::DeviceAddress indirectDeviceAddress ) const VULKAN_HPP_NOEXCEPT; + + void traceRaysKHR( const StridedDeviceAddressRegionKHR & raygenShaderBindingTable, + const StridedDeviceAddressRegionKHR & missShaderBindingTable, + const StridedDeviceAddressRegionKHR & hitShaderBindingTable, + const StridedDeviceAddressRegionKHR & callableShaderBindingTable, + uint32_t width, + uint32_t height, + uint32_t depth ) const VULKAN_HPP_NOEXCEPT; + + void traceRaysNV( VULKAN_HPP_NAMESPACE::Buffer raygenShaderBindingTableBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize raygenShaderBindingOffset, + VULKAN_HPP_NAMESPACE::Buffer missShaderBindingTableBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingOffset, + VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingStride, + VULKAN_HPP_NAMESPACE::Buffer hitShaderBindingTableBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingOffset, + VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingStride, + VULKAN_HPP_NAMESPACE::Buffer callableShaderBindingTableBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingOffset, + VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingStride, + uint32_t width, + uint32_t height, + uint32_t depth ) const VULKAN_HPP_NOEXCEPT; + + template + void updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, + ArrayProxy const & data ) const VULKAN_HPP_NOEXCEPT; + + void waitEvents( ArrayProxy const & events, + VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, + VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, + ArrayProxy const & memoryBarriers, + ArrayProxy const & bufferMemoryBarriers, + ArrayProxy const & imageMemoryBarriers ) const + VULKAN_HPP_NOEXCEPT; + + void waitEvents2KHR( ArrayProxy const & events, + ArrayProxy const & dependencyInfos ) const + VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; + + void writeAccelerationStructuresPropertiesKHR( + ArrayProxy const & accelerationStructures, + VULKAN_HPP_NAMESPACE::QueryType queryType, + VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t firstQuery ) const VULKAN_HPP_NOEXCEPT; + + void writeAccelerationStructuresPropertiesNV( + ArrayProxy const & accelerationStructures, + VULKAN_HPP_NAMESPACE::QueryType queryType, + VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t firstQuery ) const VULKAN_HPP_NOEXCEPT; + + void writeBufferMarker2AMD( VULKAN_HPP_NAMESPACE::PipelineStageFlags2KHR stage, + VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, + uint32_t marker ) const VULKAN_HPP_NOEXCEPT; + + void writeBufferMarkerAMD( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, + VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, + uint32_t marker ) const VULKAN_HPP_NOEXCEPT; + + void writeTimestamp( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, + VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t query ) const VULKAN_HPP_NOEXCEPT; + + void writeTimestamp2KHR( VULKAN_HPP_NAMESPACE::PipelineStageFlags2KHR stage, + VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t query ) const VULKAN_HPP_NOEXCEPT; + + void end() const; + + void reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const; + + VULKAN_HPP_NAMESPACE::CommandBuffer const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_commandBuffer; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::CommandBuffer m_commandBuffer; + VkDevice m_device; + VkCommandPool m_commandPool; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class CommandBuffers : public std::vector + { + public: + CommandBuffers( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::CommandBufferAllocateInfo const & allocateInfo ) + { + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher = device.getDispatcher(); + std::vector commandBuffers( allocateInfo.commandBufferCount ); + VULKAN_HPP_NAMESPACE::Result result = static_cast( + dispatcher->vkAllocateCommandBuffers( static_cast( *device ), + reinterpret_cast( &allocateInfo ), + commandBuffers.data() ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + this->reserve( allocateInfo.commandBufferCount ); + for ( auto const & commandBuffer : commandBuffers ) + { + this->emplace_back( commandBuffer, + static_cast( *device ), + static_cast( allocateInfo.commandPool ), + dispatcher ); + } + } + else + { + throwResultException( result, "vkAllocateCommandBuffers" ); + } + } + + CommandBuffers() = delete; + CommandBuffers( CommandBuffers const & ) = delete; + CommandBuffers( CommandBuffers && rhs ) = default; + CommandBuffers & operator=( CommandBuffers const & ) = delete; + CommandBuffers & operator=( CommandBuffers && rhs ) = default; + }; + + class DebugReportCallbackEXT + { + public: + using CType = VkDebugReportCallbackEXT; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eDebugReportCallbackEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDebugReportCallbackEXT; + + public: + DebugReportCallbackEXT( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::DebugReportCallbackCreateInfoEXT const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateDebugReportCallbackEXT( + static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_debugReportCallbackEXT ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateDebugReportCallbackEXT" ); + } + } + + DebugReportCallbackEXT( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VkDebugReportCallbackEXT debugReportCallbackEXT, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_debugReportCallbackEXT( debugReportCallbackEXT ) + , m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + {} + + ~DebugReportCallbackEXT() + { + if ( m_debugReportCallbackEXT ) + { + getDispatcher()->vkDestroyDebugReportCallbackEXT( + m_instance, static_cast( m_debugReportCallbackEXT ), m_allocator ); + } + } + + DebugReportCallbackEXT() = delete; + DebugReportCallbackEXT( DebugReportCallbackEXT const & ) = delete; + DebugReportCallbackEXT( DebugReportCallbackEXT && rhs ) + : m_debugReportCallbackEXT( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_debugReportCallbackEXT, {} ) ) + , m_instance( rhs.m_instance ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + DebugReportCallbackEXT & operator=( DebugReportCallbackEXT const & ) = delete; + DebugReportCallbackEXT & operator =( DebugReportCallbackEXT && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyDebugReportCallbackEXT( + m_instance, static_cast( m_debugReportCallbackEXT ), m_allocator ); + m_debugReportCallbackEXT = + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_debugReportCallbackEXT, {} ); + m_instance = rhs.m_instance; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_debugReportCallbackEXT; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT m_debugReportCallbackEXT; + VkInstance m_instance; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class DebugUtilsMessengerEXT + { + public: + using CType = VkDebugUtilsMessengerEXT; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eDebugUtilsMessengerEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + public: + DebugUtilsMessengerEXT( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCreateInfoEXT const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateDebugUtilsMessengerEXT( + static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_debugUtilsMessengerEXT ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateDebugUtilsMessengerEXT" ); + } + } + + DebugUtilsMessengerEXT( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VkDebugUtilsMessengerEXT debugUtilsMessengerEXT, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_debugUtilsMessengerEXT( debugUtilsMessengerEXT ) + , m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + {} + + ~DebugUtilsMessengerEXT() + { + if ( m_debugUtilsMessengerEXT ) + { + getDispatcher()->vkDestroyDebugUtilsMessengerEXT( + m_instance, static_cast( m_debugUtilsMessengerEXT ), m_allocator ); + } + } + + DebugUtilsMessengerEXT() = delete; + DebugUtilsMessengerEXT( DebugUtilsMessengerEXT const & ) = delete; + DebugUtilsMessengerEXT( DebugUtilsMessengerEXT && rhs ) + : m_debugUtilsMessengerEXT( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_debugUtilsMessengerEXT, {} ) ) + , m_instance( rhs.m_instance ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + DebugUtilsMessengerEXT & operator=( DebugUtilsMessengerEXT const & ) = delete; + DebugUtilsMessengerEXT & operator =( DebugUtilsMessengerEXT && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyDebugUtilsMessengerEXT( + m_instance, static_cast( m_debugUtilsMessengerEXT ), m_allocator ); + m_debugUtilsMessengerEXT = + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_debugUtilsMessengerEXT, {} ); + m_instance = rhs.m_instance; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_debugUtilsMessengerEXT; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT m_debugUtilsMessengerEXT; + VkInstance m_instance; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class DeferredOperationKHR + { + public: + using CType = VkDeferredOperationKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eDeferredOperationKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + public: + DeferredOperationKHR( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateDeferredOperationKHR( + static_cast( *device ), + m_allocator, + reinterpret_cast( &m_deferredOperationKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateDeferredOperationKHR" ); + } + } + + DeferredOperationKHR( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkDeferredOperationKHR deferredOperationKHR, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_deferredOperationKHR( deferredOperationKHR ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~DeferredOperationKHR() + { + if ( m_deferredOperationKHR ) + { + getDispatcher()->vkDestroyDeferredOperationKHR( + m_device, static_cast( m_deferredOperationKHR ), m_allocator ); + } + } + + DeferredOperationKHR() = delete; + DeferredOperationKHR( DeferredOperationKHR const & ) = delete; + DeferredOperationKHR( DeferredOperationKHR && rhs ) + : m_deferredOperationKHR( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_deferredOperationKHR, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + DeferredOperationKHR & operator=( DeferredOperationKHR const & ) = delete; + DeferredOperationKHR & operator =( DeferredOperationKHR && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyDeferredOperationKHR( + m_device, static_cast( m_deferredOperationKHR ), m_allocator ); + m_deferredOperationKHR = + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_deferredOperationKHR, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result join() const; + + VULKAN_HPP_NODISCARD uint32_t getMaxConcurrency() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result getResult() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NAMESPACE::DeferredOperationKHR const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_deferredOperationKHR; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::DeferredOperationKHR m_deferredOperationKHR; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class DescriptorPool + { + public: + using CType = VkDescriptorPool; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eDescriptorPool; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDescriptorPool; + + public: + DescriptorPool( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::DescriptorPoolCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateDescriptorPool( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_descriptorPool ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateDescriptorPool" ); + } + } + + DescriptorPool( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkDescriptorPool descriptorPool, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_descriptorPool( descriptorPool ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~DescriptorPool() + { + if ( m_descriptorPool ) + { + getDispatcher()->vkDestroyDescriptorPool( + m_device, static_cast( m_descriptorPool ), m_allocator ); + } + } + + DescriptorPool() = delete; + DescriptorPool( DescriptorPool const & ) = delete; + DescriptorPool( DescriptorPool && rhs ) + : m_descriptorPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_descriptorPool, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + DescriptorPool & operator=( DescriptorPool const & ) = delete; + DescriptorPool & operator =( DescriptorPool && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyDescriptorPool( + m_device, static_cast( m_descriptorPool ), m_allocator ); + m_descriptorPool = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_descriptorPool, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + void reset( VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const + VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NAMESPACE::DescriptorPool const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorPool; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::DescriptorPool m_descriptorPool; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class DescriptorSet + { + public: + using CType = VkDescriptorSet; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eDescriptorSet; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDescriptorSet; + + public: + DescriptorSet( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkDescriptorSet descriptorSet, + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::DescriptorPool const & descriptorPool ) + : m_descriptorSet( descriptorSet ) + , m_device( *device ) + , m_descriptorPool( *descriptorPool ) + , m_dispatcher( device.getDispatcher() ) + {} + + DescriptorSet( VkDescriptorSet descriptorSet, + VkDevice device, + VkDescriptorPool descriptorPool, + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher ) + : m_descriptorSet( descriptorSet ) + , m_device( device ) + , m_descriptorPool( descriptorPool ) + , m_dispatcher( dispatcher ) + {} + + ~DescriptorSet() + { + if ( m_descriptorSet ) + { + getDispatcher()->vkFreeDescriptorSets( + m_device, m_descriptorPool, 1, reinterpret_cast( &m_descriptorSet ) ); + } + } + + DescriptorSet() = delete; + DescriptorSet( DescriptorSet const & ) = delete; + DescriptorSet( DescriptorSet && rhs ) + : m_descriptorSet( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_descriptorSet, {} ) ) + , m_device( rhs.m_device ) + , m_descriptorPool( rhs.m_descriptorPool ) + , m_dispatcher( rhs.m_dispatcher ) + {} + DescriptorSet & operator=( DescriptorSet const & ) = delete; + DescriptorSet & operator =( DescriptorSet && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkFreeDescriptorSets( + m_device, m_descriptorPool, 1, reinterpret_cast( &m_descriptorSet ) ); + m_descriptorSet = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_descriptorSet, {} ); + m_device = rhs.m_device; + m_descriptorPool = rhs.m_descriptorPool; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + void updateWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, + const void * pData ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NAMESPACE::DescriptorSet const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSet; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::DescriptorSet m_descriptorSet; + VkDevice m_device; + VkDescriptorPool m_descriptorPool; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class DescriptorSets : public std::vector + { + public: + DescriptorSets( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::DescriptorSetAllocateInfo const & allocateInfo ) + { + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher = device.getDispatcher(); + std::vector descriptorSets( allocateInfo.descriptorSetCount ); + VULKAN_HPP_NAMESPACE::Result result = static_cast( + dispatcher->vkAllocateDescriptorSets( static_cast( *device ), + reinterpret_cast( &allocateInfo ), + descriptorSets.data() ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + this->reserve( allocateInfo.descriptorSetCount ); + for ( auto const & descriptorSet : descriptorSets ) + { + this->emplace_back( descriptorSet, + static_cast( *device ), + static_cast( allocateInfo.descriptorPool ), + dispatcher ); + } + } + else + { + throwResultException( result, "vkAllocateDescriptorSets" ); + } + } + + DescriptorSets() = delete; + DescriptorSets( DescriptorSets const & ) = delete; + DescriptorSets( DescriptorSets && rhs ) = default; + DescriptorSets & operator=( DescriptorSets const & ) = delete; + DescriptorSets & operator=( DescriptorSets && rhs ) = default; + }; + + class DescriptorSetLayout + { + public: + using CType = VkDescriptorSetLayout; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eDescriptorSetLayout; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDescriptorSetLayout; + + public: + DescriptorSetLayout( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateDescriptorSetLayout( + static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_descriptorSetLayout ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateDescriptorSetLayout" ); + } + } + + DescriptorSetLayout( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkDescriptorSetLayout descriptorSetLayout, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_descriptorSetLayout( descriptorSetLayout ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~DescriptorSetLayout() + { + if ( m_descriptorSetLayout ) + { + getDispatcher()->vkDestroyDescriptorSetLayout( + m_device, static_cast( m_descriptorSetLayout ), m_allocator ); + } + } + + DescriptorSetLayout() = delete; + DescriptorSetLayout( DescriptorSetLayout const & ) = delete; + DescriptorSetLayout( DescriptorSetLayout && rhs ) + : m_descriptorSetLayout( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_descriptorSetLayout, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + DescriptorSetLayout & operator=( DescriptorSetLayout const & ) = delete; + DescriptorSetLayout & operator =( DescriptorSetLayout && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyDescriptorSetLayout( + m_device, static_cast( m_descriptorSetLayout ), m_allocator ); + m_descriptorSetLayout = + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_descriptorSetLayout, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::DescriptorSetLayout const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSetLayout; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::DescriptorSetLayout m_descriptorSetLayout; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class DescriptorUpdateTemplate + { + public: + using CType = VkDescriptorUpdateTemplate; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eDescriptorUpdateTemplate; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDescriptorUpdateTemplate; + + public: + DescriptorUpdateTemplate( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateDescriptorUpdateTemplate( + static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_descriptorUpdateTemplate ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateDescriptorUpdateTemplate" ); + } + } + + DescriptorUpdateTemplate( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_descriptorUpdateTemplate( descriptorUpdateTemplate ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~DescriptorUpdateTemplate() + { + if ( m_descriptorUpdateTemplate ) + { + getDispatcher()->vkDestroyDescriptorUpdateTemplate( + m_device, static_cast( m_descriptorUpdateTemplate ), m_allocator ); + } + } + + DescriptorUpdateTemplate() = delete; + DescriptorUpdateTemplate( DescriptorUpdateTemplate const & ) = delete; + DescriptorUpdateTemplate( DescriptorUpdateTemplate && rhs ) + : m_descriptorUpdateTemplate( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_descriptorUpdateTemplate, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + DescriptorUpdateTemplate & operator=( DescriptorUpdateTemplate const & ) = delete; + DescriptorUpdateTemplate & operator =( DescriptorUpdateTemplate && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyDescriptorUpdateTemplate( + m_device, static_cast( m_descriptorUpdateTemplate ), m_allocator ); + m_descriptorUpdateTemplate = + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_descriptorUpdateTemplate, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorUpdateTemplate; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate m_descriptorUpdateTemplate; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class DeviceMemory + { + public: + using CType = VkDeviceMemory; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eDeviceMemory; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDeviceMemory; + + public: + DeviceMemory( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::MemoryAllocateInfo const & allocateInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkAllocateMemory( static_cast( *device ), + reinterpret_cast( &allocateInfo ), + m_allocator, + reinterpret_cast( &m_deviceMemory ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkAllocateMemory" ); + } + } + + DeviceMemory( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkDeviceMemory deviceMemory, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_deviceMemory( deviceMemory ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~DeviceMemory() + { + if ( m_deviceMemory ) + { + getDispatcher()->vkFreeMemory( m_device, static_cast( m_deviceMemory ), m_allocator ); + } + } + + DeviceMemory() = delete; + DeviceMemory( DeviceMemory const & ) = delete; + DeviceMemory( DeviceMemory && rhs ) + : m_deviceMemory( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_deviceMemory, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + DeviceMemory & operator=( DeviceMemory const & ) = delete; + DeviceMemory & operator =( DeviceMemory && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkFreeMemory( m_device, static_cast( m_deviceMemory ), m_allocator ); + m_deviceMemory = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_deviceMemory, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DeviceSize getCommitment() const VULKAN_HPP_NOEXCEPT; + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD HANDLE + getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType ) const; +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD void * + mapMemory( VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size, + VULKAN_HPP_NAMESPACE::MemoryMapFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const; + + void unmapMemory() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NAMESPACE::DeviceMemory const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_deviceMemory; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::DeviceMemory m_deviceMemory; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class DisplayKHR + { + public: + using CType = VkDisplayKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eDisplayKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDisplayKHR; + + public: +# ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + DisplayKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::PhysicalDevice const & physicalDevice, + Display & dpy, + RROutput rrOutput ) + : m_physicalDevice( *physicalDevice ), m_dispatcher( physicalDevice.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkGetRandROutputDisplayEXT( static_cast( *physicalDevice ), + &dpy, + rrOutput, + reinterpret_cast( &m_displayKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkGetRandROutputDisplayEXT" ); + } + } +# endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + +# ifdef VK_USE_PLATFORM_WIN32_KHR + DisplayKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::PhysicalDevice const & physicalDevice, + uint32_t deviceRelativeId ) + : m_physicalDevice( *physicalDevice ), m_dispatcher( physicalDevice.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkGetWinrtDisplayNV( static_cast( *physicalDevice ), + deviceRelativeId, + reinterpret_cast( &m_displayKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkGetWinrtDisplayNV" ); + } + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + DisplayKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::PhysicalDevice const & physicalDevice, + VkDisplayKHR displayKHR ) + : m_displayKHR( displayKHR ) + , m_physicalDevice( *physicalDevice ) + , m_dispatcher( physicalDevice.getDispatcher() ) + {} + + DisplayKHR( VkDisplayKHR displayKHR, + VkPhysicalDevice physicalDevice, + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher ) + : m_displayKHR( displayKHR ), m_physicalDevice( physicalDevice ), m_dispatcher( dispatcher ) + {} + + ~DisplayKHR() + { + if ( m_displayKHR ) + { + getDispatcher()->vkReleaseDisplayEXT( m_physicalDevice, static_cast( m_displayKHR ) ); + } + } + + DisplayKHR() = delete; + DisplayKHR( DisplayKHR const & ) = delete; + DisplayKHR( DisplayKHR && rhs ) + : m_displayKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_displayKHR, {} ) ) + , m_physicalDevice( rhs.m_physicalDevice ) + , m_dispatcher( rhs.m_dispatcher ) + {} + DisplayKHR & operator=( DisplayKHR const & ) = delete; + DisplayKHR & operator =( DisplayKHR && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkReleaseDisplayEXT( m_physicalDevice, static_cast( m_displayKHR ) ); + m_displayKHR = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_displayKHR, {} ); + m_physicalDevice = rhs.m_physicalDevice; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + +# ifdef VK_USE_PLATFORM_WIN32_KHR + void acquireWinrtNV() const; +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD std::vector getModeProperties2() const; + + VULKAN_HPP_NODISCARD std::vector getModeProperties() const; + + VULKAN_HPP_NAMESPACE::DisplayKHR const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_displayKHR; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::DisplayKHR m_displayKHR; + VkPhysicalDevice m_physicalDevice; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class DisplayKHRs : public std::vector + { + public: + DisplayKHRs( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::PhysicalDevice const & physicalDevice, + uint32_t planeIndex ) + { + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher = physicalDevice.getDispatcher(); + std::vector displays; + uint32_t displayCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( dispatcher->vkGetDisplayPlaneSupportedDisplaysKHR( + static_cast( *physicalDevice ), planeIndex, &displayCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && displayCount ) + { + displays.resize( displayCount ); + result = static_cast( dispatcher->vkGetDisplayPlaneSupportedDisplaysKHR( + static_cast( *physicalDevice ), planeIndex, &displayCount, displays.data() ) ); + VULKAN_HPP_ASSERT( displayCount <= displays.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + this->reserve( displayCount ); + for ( auto const & displayKHR : displays ) + { + this->emplace_back( displayKHR, static_cast( *physicalDevice ), dispatcher ); + } + } + else + { + throwResultException( result, "vkGetDisplayPlaneSupportedDisplaysKHR" ); + } + } + + DisplayKHRs() = delete; + DisplayKHRs( DisplayKHRs const & ) = delete; + DisplayKHRs( DisplayKHRs && rhs ) = default; + DisplayKHRs & operator=( DisplayKHRs const & ) = delete; + DisplayKHRs & operator=( DisplayKHRs && rhs ) = default; + }; + + class DisplayModeKHR + { + public: + using CType = VkDisplayModeKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eDisplayModeKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDisplayModeKHR; + + public: + DisplayModeKHR( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::PhysicalDevice const & physicalDevice, + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::DisplayKHR const & display, + VULKAN_HPP_NAMESPACE::DisplayModeCreateInfoKHR const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_physicalDevice( *physicalDevice ), m_dispatcher( physicalDevice.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateDisplayModeKHR( + static_cast( *physicalDevice ), + static_cast( *display ), + reinterpret_cast( &createInfo ), + reinterpret_cast( + static_cast( allocator ) ), + reinterpret_cast( &m_displayModeKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateDisplayModeKHR" ); + } + } + + DisplayModeKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::PhysicalDevice const & physicalDevice, + VkDisplayModeKHR displayModeKHR ) + : m_displayModeKHR( displayModeKHR ) + , m_physicalDevice( *physicalDevice ) + , m_dispatcher( physicalDevice.getDispatcher() ) + {} + + DisplayModeKHR() = delete; + DisplayModeKHR( DisplayModeKHR const & ) = delete; + DisplayModeKHR( DisplayModeKHR && rhs ) + : m_displayModeKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_displayModeKHR, {} ) ) + , m_dispatcher( rhs.m_dispatcher ) + {} + DisplayModeKHR & operator=( DisplayModeKHR const & ) = delete; + DisplayModeKHR & operator =( DisplayModeKHR && rhs ) + { + if ( this != &rhs ) + { + m_displayModeKHR = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_displayModeKHR, {} ); + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR + getDisplayPlaneCapabilities( uint32_t planeIndex ) const; + + VULKAN_HPP_NAMESPACE::DisplayModeKHR const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_displayModeKHR; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::DisplayModeKHR m_displayModeKHR; + VULKAN_HPP_NAMESPACE::PhysicalDevice m_physicalDevice; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class Event + { + public: + using CType = VkEvent; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eEvent; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eEvent; + + public: + Event( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::EventCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateEvent( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_event ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateEvent" ); + } + } + + Event( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkEvent event, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_event( event ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~Event() + { + if ( m_event ) + { + getDispatcher()->vkDestroyEvent( m_device, static_cast( m_event ), m_allocator ); + } + } + + Event() = delete; + Event( Event const & ) = delete; + Event( Event && rhs ) + : m_event( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_event, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + Event & operator=( Event const & ) = delete; + Event & operator =( Event && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyEvent( m_device, static_cast( m_event ), m_allocator ); + m_event = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_event, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result getStatus() const; + + void reset() const; + + void set() const; + + VULKAN_HPP_NAMESPACE::Event const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_event; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::Event m_event; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class Fence + { + public: + using CType = VkFence; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eFence; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eFence; + + public: + Fence( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::FenceCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateFence( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_fence ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateFence" ); + } + } + + Fence( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::DeviceEventInfoEXT const & deviceEventInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkRegisterDeviceEventEXT( static_cast( *device ), + reinterpret_cast( &deviceEventInfo ), + m_allocator, + reinterpret_cast( &m_fence ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkRegisterDeviceEventEXT" ); + } + } + + Fence( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::DisplayKHR const & display, + VULKAN_HPP_NAMESPACE::DisplayEventInfoEXT const & displayEventInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkRegisterDisplayEventEXT( + static_cast( *device ), + static_cast( *display ), + reinterpret_cast( &displayEventInfo ), + m_allocator, + reinterpret_cast( &m_fence ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkRegisterDisplayEventEXT" ); + } + } + + Fence( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkFence fence, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_fence( fence ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~Fence() + { + if ( m_fence ) + { + getDispatcher()->vkDestroyFence( m_device, static_cast( m_fence ), m_allocator ); + } + } + + Fence() = delete; + Fence( Fence const & ) = delete; + Fence( Fence && rhs ) + : m_fence( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_fence, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + Fence & operator=( Fence const & ) = delete; + Fence & operator =( Fence && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyFence( m_device, static_cast( m_fence ), m_allocator ); + m_fence = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_fence, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result getStatus() const; + + VULKAN_HPP_NAMESPACE::Fence const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_fence; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::Fence m_fence; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class Framebuffer + { + public: + using CType = VkFramebuffer; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eFramebuffer; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eFramebuffer; + + public: + Framebuffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::FramebufferCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateFramebuffer( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_framebuffer ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateFramebuffer" ); + } + } + + Framebuffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkFramebuffer framebuffer, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_framebuffer( framebuffer ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~Framebuffer() + { + if ( m_framebuffer ) + { + getDispatcher()->vkDestroyFramebuffer( m_device, static_cast( m_framebuffer ), m_allocator ); + } + } + + Framebuffer() = delete; + Framebuffer( Framebuffer const & ) = delete; + Framebuffer( Framebuffer && rhs ) + : m_framebuffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_framebuffer, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + Framebuffer & operator=( Framebuffer const & ) = delete; + Framebuffer & operator =( Framebuffer && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyFramebuffer( m_device, static_cast( m_framebuffer ), m_allocator ); + m_framebuffer = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_framebuffer, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::Framebuffer const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_framebuffer; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::Framebuffer m_framebuffer; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class Image + { + public: + using CType = VkImage; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eImage; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eImage; + + public: + Image( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::ImageCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateImage( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_image ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateImage" ); + } + } + + Image( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkImage image, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_image( image ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~Image() + { + if ( m_image ) + { + getDispatcher()->vkDestroyImage( m_device, static_cast( m_image ), m_allocator ); + } + } + + Image() = delete; + Image( Image const & ) = delete; + Image( Image && rhs ) + : m_image( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_image, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + Image & operator=( Image const & ) = delete; + Image & operator =( Image && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyImage( m_device, static_cast( m_image ), m_allocator ); + m_image = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_image, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + void bindMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT + getDrmFormatModifierPropertiesEXT() const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements getMemoryRequirements() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD std::vector + getSparseMemoryRequirements() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout + getSubresourceLayout( const ImageSubresource & subresource ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NAMESPACE::Image const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_image; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::Image m_image; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class ImageView + { + public: + using CType = VkImageView; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eImageView; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eImageView; + + public: + ImageView( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::ImageViewCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateImageView( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_imageView ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateImageView" ); + } + } + + ImageView( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkImageView imageView, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_imageView( imageView ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~ImageView() + { + if ( m_imageView ) + { + getDispatcher()->vkDestroyImageView( m_device, static_cast( m_imageView ), m_allocator ); + } + } + + ImageView() = delete; + ImageView( ImageView const & ) = delete; + ImageView( ImageView && rhs ) + : m_imageView( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_imageView, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + ImageView & operator=( ImageView const & ) = delete; + ImageView & operator =( ImageView && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyImageView( m_device, static_cast( m_imageView ), m_allocator ); + m_imageView = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_imageView, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX getAddressNVX() const; + + VULKAN_HPP_NAMESPACE::ImageView const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_imageView; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::ImageView m_imageView; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class IndirectCommandsLayoutNV + { + public: + using CType = VkIndirectCommandsLayoutNV; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eIndirectCommandsLayoutNV; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + public: + IndirectCommandsLayoutNV( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutCreateInfoNV const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateIndirectCommandsLayoutNV( + static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_indirectCommandsLayoutNV ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateIndirectCommandsLayoutNV" ); + } + } + + IndirectCommandsLayoutNV( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkIndirectCommandsLayoutNV indirectCommandsLayoutNV, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_indirectCommandsLayoutNV( indirectCommandsLayoutNV ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~IndirectCommandsLayoutNV() + { + if ( m_indirectCommandsLayoutNV ) + { + getDispatcher()->vkDestroyIndirectCommandsLayoutNV( + m_device, static_cast( m_indirectCommandsLayoutNV ), m_allocator ); + } + } + + IndirectCommandsLayoutNV() = delete; + IndirectCommandsLayoutNV( IndirectCommandsLayoutNV const & ) = delete; + IndirectCommandsLayoutNV( IndirectCommandsLayoutNV && rhs ) + : m_indirectCommandsLayoutNV( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_indirectCommandsLayoutNV, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + IndirectCommandsLayoutNV & operator=( IndirectCommandsLayoutNV const & ) = delete; + IndirectCommandsLayoutNV & operator =( IndirectCommandsLayoutNV && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyIndirectCommandsLayoutNV( + m_device, static_cast( m_indirectCommandsLayoutNV ), m_allocator ); + m_indirectCommandsLayoutNV = + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_indirectCommandsLayoutNV, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_indirectCommandsLayoutNV; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV m_indirectCommandsLayoutNV; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class PerformanceConfigurationINTEL + { + public: + using CType = VkPerformanceConfigurationINTEL; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::ePerformanceConfigurationINTEL; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + public: + PerformanceConfigurationINTEL( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::PerformanceConfigurationAcquireInfoINTEL const & acquireInfo ) + : m_device( *device ), m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkAcquirePerformanceConfigurationINTEL( + static_cast( *device ), + reinterpret_cast( &acquireInfo ), + reinterpret_cast( &m_performanceConfigurationINTEL ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkAcquirePerformanceConfigurationINTEL" ); + } + } + + PerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkPerformanceConfigurationINTEL performanceConfigurationINTEL ) + : m_performanceConfigurationINTEL( performanceConfigurationINTEL ) + , m_device( *device ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~PerformanceConfigurationINTEL() + { + if ( m_performanceConfigurationINTEL ) + { + getDispatcher()->vkReleasePerformanceConfigurationINTEL( + m_device, static_cast( m_performanceConfigurationINTEL ) ); + } + } + + PerformanceConfigurationINTEL() = delete; + PerformanceConfigurationINTEL( PerformanceConfigurationINTEL const & ) = delete; + PerformanceConfigurationINTEL( PerformanceConfigurationINTEL && rhs ) + : m_performanceConfigurationINTEL( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_performanceConfigurationINTEL, {} ) ) + , m_device( rhs.m_device ) + , m_dispatcher( rhs.m_dispatcher ) + {} + PerformanceConfigurationINTEL & operator=( PerformanceConfigurationINTEL const & ) = delete; + PerformanceConfigurationINTEL & operator=( PerformanceConfigurationINTEL && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkReleasePerformanceConfigurationINTEL( + m_device, static_cast( m_performanceConfigurationINTEL ) ); + m_performanceConfigurationINTEL = + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_performanceConfigurationINTEL, {} ); + m_device = rhs.m_device; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_performanceConfigurationINTEL; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL m_performanceConfigurationINTEL; + VkDevice m_device; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class PipelineCache + { + public: + using CType = VkPipelineCache; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::ePipelineCache; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::ePipelineCache; + + public: + PipelineCache( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::PipelineCacheCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreatePipelineCache( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_pipelineCache ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreatePipelineCache" ); + } + } + + PipelineCache( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkPipelineCache pipelineCache, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_pipelineCache( pipelineCache ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~PipelineCache() + { + if ( m_pipelineCache ) + { + getDispatcher()->vkDestroyPipelineCache( + m_device, static_cast( m_pipelineCache ), m_allocator ); + } + } + + PipelineCache() = delete; + PipelineCache( PipelineCache const & ) = delete; + PipelineCache( PipelineCache && rhs ) + : m_pipelineCache( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_pipelineCache, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + PipelineCache & operator=( PipelineCache const & ) = delete; + PipelineCache & operator =( PipelineCache && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyPipelineCache( + m_device, static_cast( m_pipelineCache ), m_allocator ); + m_pipelineCache = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_pipelineCache, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NODISCARD std::vector getData() const; + + void merge( ArrayProxy const & srcCaches ) const; + + VULKAN_HPP_NAMESPACE::PipelineCache const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineCache; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::PipelineCache m_pipelineCache; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class Pipeline + { + public: + using CType = VkPipeline; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::ePipeline; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::ePipeline; + + public: + Pipeline( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::Optional const & + pipelineCache, + VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + m_constructorSuccessCode = static_cast( getDispatcher()->vkCreateComputePipelines( + static_cast( *device ), + pipelineCache ? static_cast( **pipelineCache ) : 0, + 1, + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_pipeline ) ) ); + if ( ( m_constructorSuccessCode != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( m_constructorSuccessCode != VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + throwResultException( m_constructorSuccessCode, "vkCreateComputePipelines" ); + } + } + + Pipeline( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::Optional const & + pipelineCache, + VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + m_constructorSuccessCode = + static_cast( getDispatcher()->vkCreateGraphicsPipelines( + static_cast( *device ), + pipelineCache ? static_cast( **pipelineCache ) : 0, + 1, + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_pipeline ) ) ); + if ( ( m_constructorSuccessCode != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( m_constructorSuccessCode != VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + throwResultException( m_constructorSuccessCode, "vkCreateGraphicsPipelines" ); + } + } + + Pipeline( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::Optional< + const VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::DeferredOperationKHR> const & deferredOperation, + VULKAN_HPP_NAMESPACE::Optional const & + pipelineCache, + VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + m_constructorSuccessCode = + static_cast( getDispatcher()->vkCreateRayTracingPipelinesKHR( + static_cast( *device ), + deferredOperation ? static_cast( **deferredOperation ) : 0, + pipelineCache ? static_cast( **pipelineCache ) : 0, + 1, + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_pipeline ) ) ); + if ( ( m_constructorSuccessCode != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( m_constructorSuccessCode != VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR ) && + ( m_constructorSuccessCode != VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR ) && + ( m_constructorSuccessCode != VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + throwResultException( m_constructorSuccessCode, "vkCreateRayTracingPipelinesKHR" ); + } + } + + Pipeline( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::Optional const & + pipelineCache, + VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + m_constructorSuccessCode = + static_cast( getDispatcher()->vkCreateRayTracingPipelinesNV( + static_cast( *device ), + pipelineCache ? static_cast( **pipelineCache ) : 0, + 1, + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_pipeline ) ) ); + if ( ( m_constructorSuccessCode != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( m_constructorSuccessCode != VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + throwResultException( m_constructorSuccessCode, "vkCreateRayTracingPipelinesNV" ); + } + } + + Pipeline( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkPipeline pipeline, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_pipeline( pipeline ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + Pipeline( VkPipeline pipeline, + VkDevice device, + VkAllocationCallbacks const * allocator, + VULKAN_HPP_NAMESPACE::Result successCode, + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher ) + : m_pipeline( pipeline ) + , m_device( device ) + , m_allocator( allocator ) + , m_constructorSuccessCode( successCode ) + , m_dispatcher( dispatcher ) + {} + + ~Pipeline() + { + if ( m_pipeline ) + { + getDispatcher()->vkDestroyPipeline( m_device, static_cast( m_pipeline ), m_allocator ); + } + } + + Pipeline() = delete; + Pipeline( Pipeline const & ) = delete; + Pipeline( Pipeline && rhs ) + : m_pipeline( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_pipeline, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + Pipeline & operator=( Pipeline const & ) = delete; + Pipeline & operator =( Pipeline && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyPipeline( m_device, static_cast( m_pipeline ), m_allocator ); + m_pipeline = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_pipeline, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + void compileDeferredNV( uint32_t shader ) const; + + template + VULKAN_HPP_NODISCARD std::vector getRayTracingCaptureReplayShaderGroupHandlesKHR( uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize ) const; + + template + VULKAN_HPP_NODISCARD T getRayTracingCaptureReplayShaderGroupHandleKHR( uint32_t firstGroup, + uint32_t groupCount ) const; + + template + VULKAN_HPP_NODISCARD std::vector + getRayTracingShaderGroupHandlesKHR( uint32_t firstGroup, uint32_t groupCount, size_t dataSize ) const; + + template + VULKAN_HPP_NODISCARD T getRayTracingShaderGroupHandleKHR( uint32_t firstGroup, uint32_t groupCount ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DeviceSize getRayTracingShaderGroupStackSizeKHR( + uint32_t group, VULKAN_HPP_NAMESPACE::ShaderGroupShaderKHR groupShader ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD std::vector + getShaderInfoAMD( VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, + VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType ) const; + + VULKAN_HPP_NAMESPACE::Pipeline const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_pipeline; + } + + VULKAN_HPP_NAMESPACE::Result getConstructorSuccessCode() const + { + return m_constructorSuccessCode; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::Pipeline m_pipeline; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_NAMESPACE::Result m_constructorSuccessCode; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class Pipelines : public std::vector + { + public: + Pipelines( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::Optional const & + pipelineCache, + VULKAN_HPP_NAMESPACE::ArrayProxy const & createInfos, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + { + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher = device.getDispatcher(); + std::vector pipelines( createInfos.size() ); + VULKAN_HPP_NAMESPACE::Result result = + static_cast( dispatcher->vkCreateComputePipelines( + static_cast( *device ), + pipelineCache ? static_cast( **pipelineCache ) : 0, + createInfos.size(), + reinterpret_cast( createInfos.data() ), + reinterpret_cast( + static_cast( allocator ) ), + pipelines.data() ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) || + ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + this->reserve( createInfos.size() ); + for ( auto const & pipeline : pipelines ) + { + this->emplace_back( pipeline, + static_cast( *device ), + reinterpret_cast( + static_cast( allocator ) ), + result, + dispatcher ); + } + } + else + { + throwResultException( result, "vkCreateComputePipelines" ); + } + } + + Pipelines( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::Optional const & + pipelineCache, + VULKAN_HPP_NAMESPACE::ArrayProxy const & createInfos, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + { + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher = device.getDispatcher(); + std::vector pipelines( createInfos.size() ); + VULKAN_HPP_NAMESPACE::Result result = + static_cast( dispatcher->vkCreateGraphicsPipelines( + static_cast( *device ), + pipelineCache ? static_cast( **pipelineCache ) : 0, + createInfos.size(), + reinterpret_cast( createInfos.data() ), + reinterpret_cast( + static_cast( allocator ) ), + pipelines.data() ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) || + ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + this->reserve( createInfos.size() ); + for ( auto const & pipeline : pipelines ) + { + this->emplace_back( pipeline, + static_cast( *device ), + reinterpret_cast( + static_cast( allocator ) ), + result, + dispatcher ); + } + } + else + { + throwResultException( result, "vkCreateGraphicsPipelines" ); + } + } + + Pipelines( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::Optional< + const VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::DeferredOperationKHR> const & deferredOperation, + VULKAN_HPP_NAMESPACE::Optional const & + pipelineCache, + VULKAN_HPP_NAMESPACE::ArrayProxy const & createInfos, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + { + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher = device.getDispatcher(); + std::vector pipelines( createInfos.size() ); + VULKAN_HPP_NAMESPACE::Result result = + static_cast( dispatcher->vkCreateRayTracingPipelinesKHR( + static_cast( *device ), + deferredOperation ? static_cast( **deferredOperation ) : 0, + pipelineCache ? static_cast( **pipelineCache ) : 0, + createInfos.size(), + reinterpret_cast( createInfos.data() ), + reinterpret_cast( + static_cast( allocator ) ), + pipelines.data() ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) || + ( result == VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR ) || + ( result == VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR ) || + ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + this->reserve( createInfos.size() ); + for ( auto const & pipeline : pipelines ) + { + this->emplace_back( pipeline, + static_cast( *device ), + reinterpret_cast( + static_cast( allocator ) ), + result, + dispatcher ); + } + } + else + { + throwResultException( result, "vkCreateRayTracingPipelinesKHR" ); + } + } + + Pipelines( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::Optional const & + pipelineCache, + VULKAN_HPP_NAMESPACE::ArrayProxy const & createInfos, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + { + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher = device.getDispatcher(); + std::vector pipelines( createInfos.size() ); + VULKAN_HPP_NAMESPACE::Result result = + static_cast( dispatcher->vkCreateRayTracingPipelinesNV( + static_cast( *device ), + pipelineCache ? static_cast( **pipelineCache ) : 0, + createInfos.size(), + reinterpret_cast( createInfos.data() ), + reinterpret_cast( + static_cast( allocator ) ), + pipelines.data() ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) || + ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + this->reserve( createInfos.size() ); + for ( auto const & pipeline : pipelines ) + { + this->emplace_back( pipeline, + static_cast( *device ), + reinterpret_cast( + static_cast( allocator ) ), + result, + dispatcher ); + } + } + else + { + throwResultException( result, "vkCreateRayTracingPipelinesNV" ); + } + } + + Pipelines() = delete; + Pipelines( Pipelines const & ) = delete; + Pipelines( Pipelines && rhs ) = default; + Pipelines & operator=( Pipelines const & ) = delete; + Pipelines & operator=( Pipelines && rhs ) = default; + }; + + class PipelineLayout + { + public: + using CType = VkPipelineLayout; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::ePipelineLayout; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::ePipelineLayout; + + public: + PipelineLayout( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::PipelineLayoutCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreatePipelineLayout( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_pipelineLayout ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreatePipelineLayout" ); + } + } + + PipelineLayout( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkPipelineLayout pipelineLayout, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_pipelineLayout( pipelineLayout ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~PipelineLayout() + { + if ( m_pipelineLayout ) + { + getDispatcher()->vkDestroyPipelineLayout( + m_device, static_cast( m_pipelineLayout ), m_allocator ); + } + } + + PipelineLayout() = delete; + PipelineLayout( PipelineLayout const & ) = delete; + PipelineLayout( PipelineLayout && rhs ) + : m_pipelineLayout( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_pipelineLayout, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + PipelineLayout & operator=( PipelineLayout const & ) = delete; + PipelineLayout & operator =( PipelineLayout && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyPipelineLayout( + m_device, static_cast( m_pipelineLayout ), m_allocator ); + m_pipelineLayout = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_pipelineLayout, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::PipelineLayout const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineLayout; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::PipelineLayout m_pipelineLayout; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class PrivateDataSlotEXT + { + public: + using CType = VkPrivateDataSlotEXT; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::ePrivateDataSlotEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + public: + PrivateDataSlotEXT( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateInfoEXT const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreatePrivateDataSlotEXT( + static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_privateDataSlotEXT ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreatePrivateDataSlotEXT" ); + } + } + + PrivateDataSlotEXT( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkPrivateDataSlotEXT privateDataSlotEXT, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_privateDataSlotEXT( privateDataSlotEXT ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~PrivateDataSlotEXT() + { + if ( m_privateDataSlotEXT ) + { + getDispatcher()->vkDestroyPrivateDataSlotEXT( + m_device, static_cast( m_privateDataSlotEXT ), m_allocator ); + } + } + + PrivateDataSlotEXT() = delete; + PrivateDataSlotEXT( PrivateDataSlotEXT const & ) = delete; + PrivateDataSlotEXT( PrivateDataSlotEXT && rhs ) + : m_privateDataSlotEXT( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_privateDataSlotEXT, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + PrivateDataSlotEXT & operator=( PrivateDataSlotEXT const & ) = delete; + PrivateDataSlotEXT & operator =( PrivateDataSlotEXT && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyPrivateDataSlotEXT( + m_device, static_cast( m_privateDataSlotEXT ), m_allocator ); + m_privateDataSlotEXT = + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_privateDataSlotEXT, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_privateDataSlotEXT; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT m_privateDataSlotEXT; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class QueryPool + { + public: + using CType = VkQueryPool; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eQueryPool; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eQueryPool; + + public: + QueryPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::QueryPoolCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateQueryPool( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_queryPool ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateQueryPool" ); + } + } + + QueryPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkQueryPool queryPool, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_queryPool( queryPool ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~QueryPool() + { + if ( m_queryPool ) + { + getDispatcher()->vkDestroyQueryPool( m_device, static_cast( m_queryPool ), m_allocator ); + } + } + + QueryPool() = delete; + QueryPool( QueryPool const & ) = delete; + QueryPool( QueryPool && rhs ) + : m_queryPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_queryPool, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + QueryPool & operator=( QueryPool const & ) = delete; + QueryPool & operator =( QueryPool && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyQueryPool( m_device, static_cast( m_queryPool ), m_allocator ); + m_queryPool = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_queryPool, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + template + VULKAN_HPP_NODISCARD std::pair> + getResults( uint32_t firstQuery, + uint32_t queryCount, + size_t dataSize, + VULKAN_HPP_NAMESPACE::DeviceSize stride, + VULKAN_HPP_NAMESPACE::QueryResultFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const; + + template + VULKAN_HPP_NODISCARD std::pair + getResult( uint32_t firstQuery, + uint32_t queryCount, + VULKAN_HPP_NAMESPACE::DeviceSize stride, + VULKAN_HPP_NAMESPACE::QueryResultFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const; + + void reset( uint32_t firstQuery, uint32_t queryCount ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NAMESPACE::QueryPool const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_queryPool; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::QueryPool m_queryPool; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class Queue + { + public: + using CType = VkQueue; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eQueue; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eQueue; + + public: + Queue( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + uint32_t queueFamilyIndex, + uint32_t queueIndex ) + : m_dispatcher( device.getDispatcher() ) + { + getDispatcher()->vkGetDeviceQueue( + static_cast( *device ), queueFamilyIndex, queueIndex, reinterpret_cast( &m_queue ) ); + } + + Queue( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::DeviceQueueInfo2 const & queueInfo ) + : m_dispatcher( device.getDispatcher() ) + { + getDispatcher()->vkGetDeviceQueue2( static_cast( *device ), + reinterpret_cast( &queueInfo ), + reinterpret_cast( &m_queue ) ); + } + + Queue( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkQueue queue ) + : m_queue( queue ), m_dispatcher( device.getDispatcher() ) + {} + + Queue() = delete; + Queue( Queue const & ) = delete; + Queue( Queue && rhs ) + : m_queue( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_queue, {} ) ) + , m_dispatcher( rhs.m_dispatcher ) + {} + Queue & operator=( Queue const & ) = delete; + Queue & operator =( Queue && rhs ) + { + if ( this != &rhs ) + { + m_queue = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_queue, {} ); + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NODISCARD std::vector + getCheckpointData2NV() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD std::vector + getCheckpointDataNV() const VULKAN_HPP_NOEXCEPT; + + void beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo ) const VULKAN_HPP_NOEXCEPT; + + void bindSparse( ArrayProxy const & bindInfo, + VULKAN_HPP_NAMESPACE::Fence fence VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const; + + void endDebugUtilsLabelEXT() const VULKAN_HPP_NOEXCEPT; + + void insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result presentKHR( const PresentInfoKHR & presentInfo ) const; + + void setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration ) const; + + void submit( ArrayProxy const & submits, + VULKAN_HPP_NAMESPACE::Fence fence VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const; + + void submit2KHR( ArrayProxy const & submits, + VULKAN_HPP_NAMESPACE::Fence fence VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const; + + void waitIdle() const; + + VULKAN_HPP_NAMESPACE::Queue const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_queue; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::Queue m_queue; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class RenderPass + { + public: + using CType = VkRenderPass; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eRenderPass; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eRenderPass; + + public: + RenderPass( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::RenderPassCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateRenderPass( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_renderPass ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateRenderPass" ); + } + } + + RenderPass( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::RenderPassCreateInfo2 const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateRenderPass2( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_renderPass ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateRenderPass2" ); + } + } + + RenderPass( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkRenderPass renderPass, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_renderPass( renderPass ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~RenderPass() + { + if ( m_renderPass ) + { + getDispatcher()->vkDestroyRenderPass( m_device, static_cast( m_renderPass ), m_allocator ); + } + } + + RenderPass() = delete; + RenderPass( RenderPass const & ) = delete; + RenderPass( RenderPass && rhs ) + : m_renderPass( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_renderPass, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + RenderPass & operator=( RenderPass const & ) = delete; + RenderPass & operator =( RenderPass && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyRenderPass( m_device, static_cast( m_renderPass ), m_allocator ); + m_renderPass = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_renderPass, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Extent2D getRenderAreaGranularity() const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NAMESPACE::RenderPass const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_renderPass; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::RenderPass m_renderPass; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class Sampler + { + public: + using CType = VkSampler; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eSampler; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSampler; + + public: + Sampler( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::SamplerCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateSampler( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_sampler ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateSampler" ); + } + } + + Sampler( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkSampler sampler, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_sampler( sampler ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~Sampler() + { + if ( m_sampler ) + { + getDispatcher()->vkDestroySampler( m_device, static_cast( m_sampler ), m_allocator ); + } + } + + Sampler() = delete; + Sampler( Sampler const & ) = delete; + Sampler( Sampler && rhs ) + : m_sampler( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_sampler, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + Sampler & operator=( Sampler const & ) = delete; + Sampler & operator =( Sampler && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroySampler( m_device, static_cast( m_sampler ), m_allocator ); + m_sampler = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_sampler, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::Sampler const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_sampler; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::Sampler m_sampler; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class SamplerYcbcrConversion + { + public: + using CType = VkSamplerYcbcrConversion; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eSamplerYcbcrConversion; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSamplerYcbcrConversion; + + public: + SamplerYcbcrConversion( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::SamplerYcbcrConversionCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateSamplerYcbcrConversion( + static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_samplerYcbcrConversion ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateSamplerYcbcrConversion" ); + } + } + + SamplerYcbcrConversion( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkSamplerYcbcrConversion samplerYcbcrConversion, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_samplerYcbcrConversion( samplerYcbcrConversion ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~SamplerYcbcrConversion() + { + if ( m_samplerYcbcrConversion ) + { + getDispatcher()->vkDestroySamplerYcbcrConversion( + m_device, static_cast( m_samplerYcbcrConversion ), m_allocator ); + } + } + + SamplerYcbcrConversion() = delete; + SamplerYcbcrConversion( SamplerYcbcrConversion const & ) = delete; + SamplerYcbcrConversion( SamplerYcbcrConversion && rhs ) + : m_samplerYcbcrConversion( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_samplerYcbcrConversion, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + SamplerYcbcrConversion & operator=( SamplerYcbcrConversion const & ) = delete; + SamplerYcbcrConversion & operator =( SamplerYcbcrConversion && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroySamplerYcbcrConversion( + m_device, static_cast( m_samplerYcbcrConversion ), m_allocator ); + m_samplerYcbcrConversion = + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_samplerYcbcrConversion, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_samplerYcbcrConversion; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion m_samplerYcbcrConversion; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class Semaphore + { + public: + using CType = VkSemaphore; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eSemaphore; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSemaphore; + + public: + Semaphore( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::SemaphoreCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateSemaphore( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_semaphore ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateSemaphore" ); + } + } + + Semaphore( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkSemaphore semaphore, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_semaphore( semaphore ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~Semaphore() + { + if ( m_semaphore ) + { + getDispatcher()->vkDestroySemaphore( m_device, static_cast( m_semaphore ), m_allocator ); + } + } + + Semaphore() = delete; + Semaphore( Semaphore const & ) = delete; + Semaphore( Semaphore && rhs ) + : m_semaphore( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_semaphore, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + Semaphore & operator=( Semaphore const & ) = delete; + Semaphore & operator =( Semaphore && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroySemaphore( m_device, static_cast( m_semaphore ), m_allocator ); + m_semaphore = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_semaphore, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NODISCARD uint64_t getCounterValue() const; + + VULKAN_HPP_NAMESPACE::Semaphore const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_semaphore; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::Semaphore m_semaphore; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class ShaderModule + { + public: + using CType = VkShaderModule; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eShaderModule; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eShaderModule; + + public: + ShaderModule( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::ShaderModuleCreateInfo const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateShaderModule( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_shaderModule ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateShaderModule" ); + } + } + + ShaderModule( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkShaderModule shaderModule, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_shaderModule( shaderModule ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~ShaderModule() + { + if ( m_shaderModule ) + { + getDispatcher()->vkDestroyShaderModule( + m_device, static_cast( m_shaderModule ), m_allocator ); + } + } + + ShaderModule() = delete; + ShaderModule( ShaderModule const & ) = delete; + ShaderModule( ShaderModule && rhs ) + : m_shaderModule( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_shaderModule, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + ShaderModule & operator=( ShaderModule const & ) = delete; + ShaderModule & operator =( ShaderModule && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyShaderModule( + m_device, static_cast( m_shaderModule ), m_allocator ); + m_shaderModule = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_shaderModule, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::ShaderModule const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_shaderModule; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::ShaderModule m_shaderModule; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class SurfaceKHR + { + public: + using CType = VkSurfaceKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eSurfaceKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSurfaceKHR; + + public: +# ifdef VK_USE_PLATFORM_ANDROID_KHR + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::AndroidSurfaceCreateInfoKHR const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateAndroidSurfaceKHR( + static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateAndroidSurfaceKHR" ); + } + } +# endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + +# ifdef VK_USE_PLATFORM_DIRECTFB_EXT + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::DirectFBSurfaceCreateInfoEXT const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateDirectFBSurfaceEXT( + static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateDirectFBSurfaceEXT" ); + } + } +# endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::DisplaySurfaceCreateInfoKHR const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateDisplayPlaneSurfaceKHR( + static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateDisplayPlaneSurfaceKHR" ); + } + } + + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::HeadlessSurfaceCreateInfoEXT const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateHeadlessSurfaceEXT( + static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateHeadlessSurfaceEXT" ); + } + } + +# ifdef VK_USE_PLATFORM_IOS_MVK + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::IOSSurfaceCreateInfoMVK const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateIOSSurfaceMVK( static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateIOSSurfaceMVK" ); + } + } +# endif /*VK_USE_PLATFORM_IOS_MVK*/ + +# ifdef VK_USE_PLATFORM_FUCHSIA + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::ImagePipeSurfaceCreateInfoFUCHSIA const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateImagePipeSurfaceFUCHSIA( + static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateImagePipeSurfaceFUCHSIA" ); + } + } +# endif /*VK_USE_PLATFORM_FUCHSIA*/ + +# ifdef VK_USE_PLATFORM_MACOS_MVK + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::MacOSSurfaceCreateInfoMVK const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateMacOSSurfaceMVK( + static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateMacOSSurfaceMVK" ); + } + } +# endif /*VK_USE_PLATFORM_MACOS_MVK*/ + +# ifdef VK_USE_PLATFORM_METAL_EXT + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::MetalSurfaceCreateInfoEXT const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateMetalSurfaceEXT( + static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateMetalSurfaceEXT" ); + } + } +# endif /*VK_USE_PLATFORM_METAL_EXT*/ + +# ifdef VK_USE_PLATFORM_GGP + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::StreamDescriptorSurfaceCreateInfoGGP const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateStreamDescriptorSurfaceGGP( + static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateStreamDescriptorSurfaceGGP" ); + } + } +# endif /*VK_USE_PLATFORM_GGP*/ + +# ifdef VK_USE_PLATFORM_VI_NN + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::ViSurfaceCreateInfoNN const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateViSurfaceNN( static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateViSurfaceNN" ); + } + } +# endif /*VK_USE_PLATFORM_VI_NN*/ + +# ifdef VK_USE_PLATFORM_WAYLAND_KHR + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::WaylandSurfaceCreateInfoKHR const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateWaylandSurfaceKHR( + static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateWaylandSurfaceKHR" ); + } + } +# endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +# ifdef VK_USE_PLATFORM_WIN32_KHR + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::Win32SurfaceCreateInfoKHR const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateWin32SurfaceKHR( + static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateWin32SurfaceKHR" ); + } + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +# ifdef VK_USE_PLATFORM_XCB_KHR + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::XcbSurfaceCreateInfoKHR const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateXcbSurfaceKHR( static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateXcbSurfaceKHR" ); + } + } +# endif /*VK_USE_PLATFORM_XCB_KHR*/ + +# ifdef VK_USE_PLATFORM_XLIB_KHR + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VULKAN_HPP_NAMESPACE::XlibSurfaceCreateInfoKHR const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateXlibSurfaceKHR( static_cast( *instance ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_surfaceKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateXlibSurfaceKHR" ); + } + } +# endif /*VK_USE_PLATFORM_XLIB_KHR*/ + + SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, + VkSurfaceKHR surfaceKHR, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_surfaceKHR( surfaceKHR ) + , m_instance( *instance ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( instance.getDispatcher() ) + {} + + ~SurfaceKHR() + { + if ( m_surfaceKHR ) + { + getDispatcher()->vkDestroySurfaceKHR( m_instance, static_cast( m_surfaceKHR ), m_allocator ); + } + } + + SurfaceKHR() = delete; + SurfaceKHR( SurfaceKHR const & ) = delete; + SurfaceKHR( SurfaceKHR && rhs ) + : m_surfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_surfaceKHR, {} ) ) + , m_instance( rhs.m_instance ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + SurfaceKHR & operator=( SurfaceKHR const & ) = delete; + SurfaceKHR & operator =( SurfaceKHR && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroySurfaceKHR( m_instance, static_cast( m_surfaceKHR ), m_allocator ); + m_surfaceKHR = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_surfaceKHR, {} ); + m_instance = rhs.m_instance; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NAMESPACE::SurfaceKHR const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_surfaceKHR; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::SurfaceKHR m_surfaceKHR; + VkInstance m_instance; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class SwapchainKHR + { + public: + using CType = VkSwapchainKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eSwapchainKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSwapchainKHR; + + public: + SwapchainKHR( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCreateSwapchainKHR( static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_swapchainKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateSwapchainKHR" ); + } + } + + SwapchainKHR( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkSwapchainKHR swapchainKHR, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_swapchainKHR( swapchainKHR ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + SwapchainKHR( VkSwapchainKHR swapchainKHR, + VkDevice device, + VkAllocationCallbacks const * allocator, + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher ) + : m_swapchainKHR( swapchainKHR ), m_device( device ), m_allocator( allocator ), m_dispatcher( dispatcher ) + {} + + ~SwapchainKHR() + { + if ( m_swapchainKHR ) + { + getDispatcher()->vkDestroySwapchainKHR( + m_device, static_cast( m_swapchainKHR ), m_allocator ); + } + } + + SwapchainKHR() = delete; + SwapchainKHR( SwapchainKHR const & ) = delete; + SwapchainKHR( SwapchainKHR && rhs ) + : m_swapchainKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_swapchainKHR, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + SwapchainKHR & operator=( SwapchainKHR const & ) = delete; + SwapchainKHR & operator =( SwapchainKHR && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroySwapchainKHR( + m_device, static_cast( m_swapchainKHR ), m_allocator ); + m_swapchainKHR = VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_swapchainKHR, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + +# ifdef VK_USE_PLATFORM_WIN32_KHR + void acquireFullScreenExclusiveModeEXT() const; +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD std::pair + acquireNextImage( uint64_t timeout, + VULKAN_HPP_NAMESPACE::Semaphore semaphore VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, + VULKAN_HPP_NAMESPACE::Fence fence VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT ) const; + + VULKAN_HPP_NODISCARD std::vector + getPastPresentationTimingGOOGLE() const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE getRefreshCycleDurationGOOGLE() const; + + VULKAN_HPP_NODISCARD uint64_t getCounterEXT( VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter ) const; + + VULKAN_HPP_NODISCARD std::vector getImages() const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result getStatus() const; + +# ifdef VK_USE_PLATFORM_WIN32_KHR + void releaseFullScreenExclusiveModeEXT() const; +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + void setLocalDimmingAMD( VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NAMESPACE::SwapchainKHR const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_swapchainKHR; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::SwapchainKHR m_swapchainKHR; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + class SwapchainKHRs : public std::vector + { + public: + SwapchainKHRs( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::ArrayProxy const & createInfos, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + { + VULKAN_HPP_RAII_DISPATCHER_TYPE const * dispatcher = device.getDispatcher(); + std::vector swapchains( createInfos.size() ); + VULKAN_HPP_NAMESPACE::Result result = + static_cast( dispatcher->vkCreateSharedSwapchainsKHR( + static_cast( *device ), + createInfos.size(), + reinterpret_cast( createInfos.data() ), + reinterpret_cast( + static_cast( allocator ) ), + swapchains.data() ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + this->reserve( createInfos.size() ); + for ( auto const & swapchainKHR : swapchains ) + { + this->emplace_back( swapchainKHR, + static_cast( *device ), + reinterpret_cast( + static_cast( allocator ) ), + dispatcher ); + } + } + else + { + throwResultException( result, "vkCreateSharedSwapchainsKHR" ); + } + } + + SwapchainKHRs() = delete; + SwapchainKHRs( SwapchainKHRs const & ) = delete; + SwapchainKHRs( SwapchainKHRs && rhs ) = default; + SwapchainKHRs & operator=( SwapchainKHRs const & ) = delete; + SwapchainKHRs & operator=( SwapchainKHRs && rhs ) = default; + }; + + class ValidationCacheEXT + { + public: + using CType = VkValidationCacheEXT; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = + VULKAN_HPP_NAMESPACE::ObjectType::eValidationCacheEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eValidationCacheEXT; + + public: + ValidationCacheEXT( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::ValidationCacheCreateInfoEXT const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCreateValidationCacheEXT( + static_cast( *device ), + reinterpret_cast( &createInfo ), + m_allocator, + reinterpret_cast( &m_validationCacheEXT ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, "vkCreateValidationCacheEXT" ); + } + } + + ValidationCacheEXT( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkValidationCacheEXT validationCacheEXT, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + : m_validationCacheEXT( validationCacheEXT ) + , m_device( *device ) + , m_allocator( reinterpret_cast( + static_cast( allocator ) ) ) + , m_dispatcher( device.getDispatcher() ) + {} + + ~ValidationCacheEXT() + { + if ( m_validationCacheEXT ) + { + getDispatcher()->vkDestroyValidationCacheEXT( + m_device, static_cast( m_validationCacheEXT ), m_allocator ); + } + } + + ValidationCacheEXT() = delete; + ValidationCacheEXT( ValidationCacheEXT const & ) = delete; + ValidationCacheEXT( ValidationCacheEXT && rhs ) + : m_validationCacheEXT( + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_validationCacheEXT, {} ) ) + , m_device( rhs.m_device ) + , m_allocator( rhs.m_allocator ) + , m_dispatcher( rhs.m_dispatcher ) + {} + ValidationCacheEXT & operator=( ValidationCacheEXT const & ) = delete; + ValidationCacheEXT & operator =( ValidationCacheEXT && rhs ) + { + if ( this != &rhs ) + { + getDispatcher()->vkDestroyValidationCacheEXT( + m_device, static_cast( m_validationCacheEXT ), m_allocator ); + m_validationCacheEXT = + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_validationCacheEXT, {} ); + m_device = rhs.m_device; + m_allocator = rhs.m_allocator; + m_dispatcher = rhs.m_dispatcher; + } + return *this; + } + + VULKAN_HPP_NODISCARD std::vector getData() const; + + void merge( ArrayProxy const & srcCaches ) const; + + VULKAN_HPP_NAMESPACE::ValidationCacheEXT const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_validationCacheEXT; + } + + VULKAN_HPP_RAII_DISPATCHER_TYPE const * getDispatcher() const + { + return m_dispatcher; + } + + private: + VULKAN_HPP_NAMESPACE::ValidationCacheEXT m_validationCacheEXT; + VkDevice m_device; + const VkAllocationCallbacks * m_allocator; + VULKAN_HPP_RAII_DISPATCHER_TYPE const * m_dispatcher; + }; + + VULKAN_HPP_NODISCARD std::vector + Context::enumerateInstanceExtensionProperties( Optional layerName ) const + { + std::vector properties; + uint32_t propertyCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->vkEnumerateInstanceExtensionProperties( + layerName ? layerName->c_str() : nullptr, &propertyCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( getDispatcher()->vkEnumerateInstanceExtensionProperties( + layerName ? layerName->c_str() : nullptr, + &propertyCount, + reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Context::enumerateInstanceExtensionProperties" ); + } + return properties; + } + + VULKAN_HPP_NODISCARD std::vector + Context::enumerateInstanceLayerProperties() const + { + std::vector properties; + uint32_t propertyCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( + getDispatcher()->vkEnumerateInstanceLayerProperties( &propertyCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( getDispatcher()->vkEnumerateInstanceLayerProperties( + &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Context::enumerateInstanceLayerProperties" ); + } + return properties; + } + + VULKAN_HPP_NODISCARD uint32_t Context::enumerateInstanceVersion() const + { + uint32_t apiVersion; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkEnumerateInstanceVersion( &apiVersion ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Context::enumerateInstanceVersion" ); + } + return apiVersion; + } + + VULKAN_HPP_INLINE void Instance::debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType_, + uint64_t object, + size_t location, + int32_t messageCode, + const std::string & layerPrefix, + const std::string & message ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkDebugReportMessageEXT( static_cast( m_instance ), + static_cast( flags ), + static_cast( objectType_ ), + object, + location, + messageCode, + layerPrefix.c_str(), + message.c_str() ); + } + + VULKAN_HPP_NODISCARD std::vector + Instance::enumeratePhysicalDeviceGroups() const + { + std::vector physicalDeviceGroupProperties; + uint32_t physicalDeviceGroupCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->vkEnumeratePhysicalDeviceGroups( + static_cast( m_instance ), &physicalDeviceGroupCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && physicalDeviceGroupCount ) + { + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + result = static_cast( getDispatcher()->vkEnumeratePhysicalDeviceGroups( + static_cast( m_instance ), + &physicalDeviceGroupCount, + reinterpret_cast( physicalDeviceGroupProperties.data() ) ) ); + VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) ) + { + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDeviceGroups" ); + } + return physicalDeviceGroupProperties; + } + + VULKAN_HPP_NODISCARD PFN_vkVoidFunction Instance::getProcAddr( const std::string & name ) const VULKAN_HPP_NOEXCEPT + { + return getDispatcher()->vkGetInstanceProcAddr( static_cast( m_instance ), name.c_str() ); + } + + VULKAN_HPP_INLINE void Instance::submitDebugUtilsMessageEXT( + VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, + const DebugUtilsMessengerCallbackDataEXT & callbackData ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkSubmitDebugUtilsMessageEXT( + static_cast( m_instance ), + static_cast( messageSeverity ), + static_cast( messageTypes ), + reinterpret_cast( &callbackData ) ); + } + +# ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + VULKAN_HPP_INLINE void PhysicalDevice::acquireXlibDisplayEXT( Display & dpy, + VULKAN_HPP_NAMESPACE::DisplayKHR display ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkAcquireXlibDisplayEXT( + static_cast( m_physicalDevice ), &dpy, static_cast( display ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::acquireXlibDisplayEXT" ); + } + } +# endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::enumerateDeviceExtensionProperties( Optional layerName ) const + { + std::vector properties; + uint32_t propertyCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( + getDispatcher()->vkEnumerateDeviceExtensionProperties( static_cast( m_physicalDevice ), + layerName ? layerName->c_str() : nullptr, + &propertyCount, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( getDispatcher()->vkEnumerateDeviceExtensionProperties( + static_cast( m_physicalDevice ), + layerName ? layerName->c_str() : nullptr, + &propertyCount, + reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, + VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceExtensionProperties" ); + } + return properties; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::enumerateDeviceLayerProperties() const + { + std::vector properties; + uint32_t propertyCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->vkEnumerateDeviceLayerProperties( + static_cast( m_physicalDevice ), &propertyCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( getDispatcher()->vkEnumerateDeviceLayerProperties( + static_cast( m_physicalDevice ), + &propertyCount, + reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceLayerProperties" ); + } + return properties; + } + + VULKAN_HPP_NODISCARD std::pair, std::vector> + PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex ) const + { + std::pair, std::vector> data; + std::vector & counters = data.first; + std::vector & counterDescriptions = data.second; + uint32_t counterCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( + getDispatcher()->vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( + static_cast( m_physicalDevice ), queueFamilyIndex, &counterCount, nullptr, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && counterCount ) + { + counters.resize( counterCount ); + counterDescriptions.resize( counterCount ); + result = static_cast( + getDispatcher()->vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( + static_cast( m_physicalDevice ), + queueFamilyIndex, + &counterCount, + reinterpret_cast( counters.data() ), + reinterpret_cast( counterDescriptions.data() ) ) ); + VULKAN_HPP_ASSERT( counterCount <= counters.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( counterCount < counters.size() ) ) + { + counters.resize( counterCount ); + counterDescriptions.resize( counterCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( + result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); + } + return data; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR + PhysicalDevice::getDisplayPlaneCapabilities2KHR( const DisplayPlaneInfo2KHR & displayPlaneInfo ) const + { + VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR capabilities; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetDisplayPlaneCapabilities2KHR( + static_cast( m_physicalDevice ), + reinterpret_cast( &displayPlaneInfo ), + reinterpret_cast( &capabilities ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneCapabilities2KHR" ); + } + return capabilities; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getCalibrateableTimeDomainsEXT() const + { + std::vector timeDomains; + uint32_t timeDomainCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = + static_cast( getDispatcher()->vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( + static_cast( m_physicalDevice ), &timeDomainCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && timeDomainCount ) + { + timeDomains.resize( timeDomainCount ); + result = + static_cast( getDispatcher()->vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( + static_cast( m_physicalDevice ), + &timeDomainCount, + reinterpret_cast( timeDomains.data() ) ) ); + VULKAN_HPP_ASSERT( timeDomainCount <= timeDomains.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( timeDomainCount < timeDomains.size() ) ) + { + timeDomains.resize( timeDomainCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCalibrateableTimeDomainsEXT" ); + } + return timeDomains; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getCooperativeMatrixPropertiesNV() const + { + std::vector properties; + uint32_t propertyCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = + static_cast( getDispatcher()->vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( + static_cast( m_physicalDevice ), &propertyCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( + getDispatcher()->vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( + static_cast( m_physicalDevice ), + &propertyCount, + reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, + VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCooperativeMatrixPropertiesNV" ); + } + return properties; + } + +# ifdef VK_USE_PLATFORM_DIRECTFB_EXT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Bool32 + PhysicalDevice::getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, + IDirectFB & dfb ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( + getDispatcher()->vkGetPhysicalDeviceDirectFBPresentationSupportEXT( + static_cast( m_physicalDevice ), queueFamilyIndex, &dfb ) ); + } +# endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getDisplayPlaneProperties2KHR() const + { + std::vector properties; + uint32_t propertyCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = + static_cast( getDispatcher()->vkGetPhysicalDeviceDisplayPlaneProperties2KHR( + static_cast( m_physicalDevice ), &propertyCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = + static_cast( getDispatcher()->vkGetPhysicalDeviceDisplayPlaneProperties2KHR( + static_cast( m_physicalDevice ), + &propertyCount, + reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneProperties2KHR" ); + } + return properties; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getDisplayPlanePropertiesKHR() const + { + std::vector properties; + uint32_t propertyCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = + static_cast( getDispatcher()->vkGetPhysicalDeviceDisplayPlanePropertiesKHR( + static_cast( m_physicalDevice ), &propertyCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = + static_cast( getDispatcher()->vkGetPhysicalDeviceDisplayPlanePropertiesKHR( + static_cast( m_physicalDevice ), + &propertyCount, + reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlanePropertiesKHR" ); + } + return properties; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getDisplayProperties2KHR() const + { + std::vector properties; + uint32_t propertyCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->vkGetPhysicalDeviceDisplayProperties2KHR( + static_cast( m_physicalDevice ), &propertyCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( getDispatcher()->vkGetPhysicalDeviceDisplayProperties2KHR( + static_cast( m_physicalDevice ), + &propertyCount, + reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayProperties2KHR" ); + } + return properties; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getDisplayPropertiesKHR() const + { + std::vector properties; + uint32_t propertyCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->vkGetPhysicalDeviceDisplayPropertiesKHR( + static_cast( m_physicalDevice ), &propertyCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( getDispatcher()->vkGetPhysicalDeviceDisplayPropertiesKHR( + static_cast( m_physicalDevice ), + &propertyCount, + reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPropertiesKHR" ); + } + return properties; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalBufferProperties PhysicalDevice::getExternalBufferProperties( + const PhysicalDeviceExternalBufferInfo & externalBufferInfo ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::ExternalBufferProperties externalBufferProperties; + getDispatcher()->vkGetPhysicalDeviceExternalBufferProperties( + static_cast( m_physicalDevice ), + reinterpret_cast( &externalBufferInfo ), + reinterpret_cast( &externalBufferProperties ) ); + return externalBufferProperties; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalFenceProperties PhysicalDevice::getExternalFenceProperties( + const PhysicalDeviceExternalFenceInfo & externalFenceInfo ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::ExternalFenceProperties externalFenceProperties; + getDispatcher()->vkGetPhysicalDeviceExternalFenceProperties( + static_cast( m_physicalDevice ), + reinterpret_cast( &externalFenceInfo ), + reinterpret_cast( &externalFenceProperties ) ); + return externalFenceProperties; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV + PhysicalDevice::getExternalImageFormatPropertiesNV( + VULKAN_HPP_NAMESPACE::Format format, + VULKAN_HPP_NAMESPACE::ImageType type, + VULKAN_HPP_NAMESPACE::ImageTiling tiling, + VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, + VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType ) const + { + VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV externalImageFormatProperties; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetPhysicalDeviceExternalImageFormatPropertiesNV( + static_cast( m_physicalDevice ), + static_cast( format ), + static_cast( type ), + static_cast( tiling ), + static_cast( usage ), + static_cast( flags ), + static_cast( externalHandleType ), + reinterpret_cast( &externalImageFormatProperties ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, + VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getExternalImageFormatPropertiesNV" ); + } + return externalImageFormatProperties; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties + PhysicalDevice::getExternalSemaphoreProperties( + const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties externalSemaphoreProperties; + getDispatcher()->vkGetPhysicalDeviceExternalSemaphoreProperties( + static_cast( m_physicalDevice ), + reinterpret_cast( &externalSemaphoreInfo ), + reinterpret_cast( &externalSemaphoreProperties ) ); + return externalSemaphoreProperties; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures + PhysicalDevice::getFeatures() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures features; + getDispatcher()->vkGetPhysicalDeviceFeatures( static_cast( m_physicalDevice ), + reinterpret_cast( &features ) ); + return features; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 + PhysicalDevice::getFeatures2() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 features; + getDispatcher()->vkGetPhysicalDeviceFeatures2( static_cast( m_physicalDevice ), + reinterpret_cast( &features ) ); + return features; + } + + template + VULKAN_HPP_NODISCARD StructureChain PhysicalDevice::getFeatures2() const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 & features = + structureChain.template get(); + getDispatcher()->vkGetPhysicalDeviceFeatures2( static_cast( m_physicalDevice ), + reinterpret_cast( &features ) ); + return structureChain; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::FormatProperties + PhysicalDevice::getFormatProperties( VULKAN_HPP_NAMESPACE::Format format ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::FormatProperties formatProperties; + getDispatcher()->vkGetPhysicalDeviceFormatProperties( + static_cast( m_physicalDevice ), + static_cast( format ), + reinterpret_cast( &formatProperties ) ); + return formatProperties; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::FormatProperties2 + PhysicalDevice::getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::FormatProperties2 formatProperties; + getDispatcher()->vkGetPhysicalDeviceFormatProperties2( + static_cast( m_physicalDevice ), + static_cast( format ), + reinterpret_cast( &formatProperties ) ); + return formatProperties; + } + + template + VULKAN_HPP_NODISCARD StructureChain + PhysicalDevice::getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::FormatProperties2 & formatProperties = + structureChain.template get(); + getDispatcher()->vkGetPhysicalDeviceFormatProperties2( + static_cast( m_physicalDevice ), + static_cast( format ), + reinterpret_cast( &formatProperties ) ); + return structureChain; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getFragmentShadingRatesKHR() const + { + std::vector fragmentShadingRates; + uint32_t fragmentShadingRateCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->vkGetPhysicalDeviceFragmentShadingRatesKHR( + static_cast( m_physicalDevice ), &fragmentShadingRateCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && fragmentShadingRateCount ) + { + fragmentShadingRates.resize( fragmentShadingRateCount ); + result = + static_cast( getDispatcher()->vkGetPhysicalDeviceFragmentShadingRatesKHR( + static_cast( m_physicalDevice ), + &fragmentShadingRateCount, + reinterpret_cast( fragmentShadingRates.data() ) ) ); + VULKAN_HPP_ASSERT( fragmentShadingRateCount <= fragmentShadingRates.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( fragmentShadingRateCount < fragmentShadingRates.size() ) ) + { + fragmentShadingRates.resize( fragmentShadingRateCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getFragmentShadingRatesKHR" ); + } + return fragmentShadingRates; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ImageFormatProperties + PhysicalDevice::getImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, + VULKAN_HPP_NAMESPACE::ImageType type, + VULKAN_HPP_NAMESPACE::ImageTiling tiling, + VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, + VULKAN_HPP_NAMESPACE::ImageCreateFlags flags ) const + { + VULKAN_HPP_NAMESPACE::ImageFormatProperties imageFormatProperties; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetPhysicalDeviceImageFormatProperties( + static_cast( m_physicalDevice ), + static_cast( format ), + static_cast( type ), + static_cast( tiling ), + static_cast( usage ), + static_cast( flags ), + reinterpret_cast( &imageFormatProperties ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties" ); + } + return imageFormatProperties; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ImageFormatProperties2 + PhysicalDevice::getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo ) const + { + VULKAN_HPP_NAMESPACE::ImageFormatProperties2 imageFormatProperties; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetPhysicalDeviceImageFormatProperties2( + static_cast( m_physicalDevice ), + reinterpret_cast( &imageFormatInfo ), + reinterpret_cast( &imageFormatProperties ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2" ); + } + return imageFormatProperties; + } + + template + VULKAN_HPP_NODISCARD StructureChain + PhysicalDevice::getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo ) const + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::ImageFormatProperties2 & imageFormatProperties = + structureChain.template get(); + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetPhysicalDeviceImageFormatProperties2( + static_cast( m_physicalDevice ), + reinterpret_cast( &imageFormatInfo ), + reinterpret_cast( &imageFormatProperties ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2" ); + } + return structureChain; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties + PhysicalDevice::getMemoryProperties() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties memoryProperties; + getDispatcher()->vkGetPhysicalDeviceMemoryProperties( + static_cast( m_physicalDevice ), + reinterpret_cast( &memoryProperties ) ); + return memoryProperties; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 + PhysicalDevice::getMemoryProperties2() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 memoryProperties; + getDispatcher()->vkGetPhysicalDeviceMemoryProperties2( + static_cast( m_physicalDevice ), + reinterpret_cast( &memoryProperties ) ); + return memoryProperties; + } + + template + VULKAN_HPP_NODISCARD StructureChain PhysicalDevice::getMemoryProperties2() const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 & memoryProperties = + structureChain.template get(); + getDispatcher()->vkGetPhysicalDeviceMemoryProperties2( + static_cast( m_physicalDevice ), + reinterpret_cast( &memoryProperties ) ); + return structureChain; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT PhysicalDevice::getMultisamplePropertiesEXT( + VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT multisampleProperties; + getDispatcher()->vkGetPhysicalDeviceMultisamplePropertiesEXT( + static_cast( m_physicalDevice ), + static_cast( samples ), + reinterpret_cast( &multisampleProperties ) ); + return multisampleProperties; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const + { + std::vector rects; + uint32_t rectCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( + getDispatcher()->vkGetPhysicalDevicePresentRectanglesKHR( static_cast( m_physicalDevice ), + static_cast( surface ), + &rectCount, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && rectCount ) + { + rects.resize( rectCount ); + result = static_cast( + getDispatcher()->vkGetPhysicalDevicePresentRectanglesKHR( static_cast( m_physicalDevice ), + static_cast( surface ), + &rectCount, + reinterpret_cast( rects.data() ) ) ); + VULKAN_HPP_ASSERT( rectCount <= rects.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( rectCount < rects.size() ) ) + { + rects.resize( rectCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getPresentRectanglesKHR" ); + } + return rects; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties + PhysicalDevice::getProperties() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties properties; + getDispatcher()->vkGetPhysicalDeviceProperties( static_cast( m_physicalDevice ), + reinterpret_cast( &properties ) ); + return properties; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 + PhysicalDevice::getProperties2() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 properties; + getDispatcher()->vkGetPhysicalDeviceProperties2( static_cast( m_physicalDevice ), + reinterpret_cast( &properties ) ); + return properties; + } + + template + VULKAN_HPP_NODISCARD StructureChain PhysicalDevice::getProperties2() const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 & properties = + structureChain.template get(); + getDispatcher()->vkGetPhysicalDeviceProperties2( static_cast( m_physicalDevice ), + reinterpret_cast( &properties ) ); + return structureChain; + } + + VULKAN_HPP_NODISCARD uint32_t PhysicalDevice::getQueueFamilyPerformanceQueryPassesKHR( + const QueryPoolPerformanceCreateInfoKHR & performanceQueryCreateInfo ) const VULKAN_HPP_NOEXCEPT + { + uint32_t numPasses; + getDispatcher()->vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( + static_cast( m_physicalDevice ), + reinterpret_cast( &performanceQueryCreateInfo ), + &numPasses ); + return numPasses; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getQueueFamilyProperties() const VULKAN_HPP_NOEXCEPT + { + uint32_t queueFamilyPropertyCount; + getDispatcher()->vkGetPhysicalDeviceQueueFamilyProperties( + static_cast( m_physicalDevice ), &queueFamilyPropertyCount, nullptr ); + std::vector queueFamilyProperties( queueFamilyPropertyCount ); + getDispatcher()->vkGetPhysicalDeviceQueueFamilyProperties( + static_cast( m_physicalDevice ), + &queueFamilyPropertyCount, + reinterpret_cast( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + return queueFamilyProperties; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getQueueFamilyProperties2() const VULKAN_HPP_NOEXCEPT + { + uint32_t queueFamilyPropertyCount; + getDispatcher()->vkGetPhysicalDeviceQueueFamilyProperties2( + static_cast( m_physicalDevice ), &queueFamilyPropertyCount, nullptr ); + std::vector queueFamilyProperties( queueFamilyPropertyCount ); + getDispatcher()->vkGetPhysicalDeviceQueueFamilyProperties2( + static_cast( m_physicalDevice ), + &queueFamilyPropertyCount, + reinterpret_cast( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + return queueFamilyProperties; + } + + template + VULKAN_HPP_NODISCARD std::vector PhysicalDevice::getQueueFamilyProperties2() const + { + uint32_t queueFamilyPropertyCount; + getDispatcher()->vkGetPhysicalDeviceQueueFamilyProperties2( + static_cast( m_physicalDevice ), &queueFamilyPropertyCount, nullptr ); + std::vector returnVector( queueFamilyPropertyCount ); + std::vector queueFamilyProperties( queueFamilyPropertyCount ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) + { + queueFamilyProperties[i].pNext = + returnVector[i].template get().pNext; + } + getDispatcher()->vkGetPhysicalDeviceQueueFamilyProperties2( + static_cast( m_physicalDevice ), + &queueFamilyPropertyCount, + reinterpret_cast( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) + { + returnVector[i].template get() = queueFamilyProperties[i]; + } + return returnVector; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, + VULKAN_HPP_NAMESPACE::ImageType type, + VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, + VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, + VULKAN_HPP_NAMESPACE::ImageTiling tiling ) const + VULKAN_HPP_NOEXCEPT + { + uint32_t propertyCount; + getDispatcher()->vkGetPhysicalDeviceSparseImageFormatProperties( + static_cast( m_physicalDevice ), + static_cast( format ), + static_cast( type ), + static_cast( samples ), + static_cast( usage ), + static_cast( tiling ), + &propertyCount, + nullptr ); + std::vector properties( propertyCount ); + getDispatcher()->vkGetPhysicalDeviceSparseImageFormatProperties( + static_cast( m_physicalDevice ), + static_cast( format ), + static_cast( type ), + static_cast( samples ), + static_cast( usage ), + static_cast( tiling ), + &propertyCount, + reinterpret_cast( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + return properties; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo ) const + VULKAN_HPP_NOEXCEPT + { + uint32_t propertyCount; + getDispatcher()->vkGetPhysicalDeviceSparseImageFormatProperties2( + static_cast( m_physicalDevice ), + reinterpret_cast( &formatInfo ), + &propertyCount, + nullptr ); + std::vector properties( propertyCount ); + getDispatcher()->vkGetPhysicalDeviceSparseImageFormatProperties2( + static_cast( m_physicalDevice ), + reinterpret_cast( &formatInfo ), + &propertyCount, + reinterpret_cast( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + return properties; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV() const + { + std::vector combinations; + uint32_t combinationCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( + getDispatcher()->vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( + static_cast( m_physicalDevice ), &combinationCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && combinationCount ) + { + combinations.resize( combinationCount ); + result = static_cast( + getDispatcher()->vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( + static_cast( m_physicalDevice ), + &combinationCount, + reinterpret_cast( combinations.data() ) ) ); + VULKAN_HPP_ASSERT( combinationCount <= combinations.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( combinationCount < combinations.size() ) ) + { + combinations.resize( combinationCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( + result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV" ); + } + return combinations; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT + PhysicalDevice::getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const + { + VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT surfaceCapabilities; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetPhysicalDeviceSurfaceCapabilities2EXT( + static_cast( m_physicalDevice ), + static_cast( surface ), + reinterpret_cast( &surfaceCapabilities ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2EXT" ); + } + return surfaceCapabilities; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR + PhysicalDevice::getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const + { + VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR surfaceCapabilities; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetPhysicalDeviceSurfaceCapabilities2KHR( + static_cast( m_physicalDevice ), + reinterpret_cast( &surfaceInfo ), + reinterpret_cast( &surfaceCapabilities ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2KHR" ); + } + return surfaceCapabilities; + } + + template + VULKAN_HPP_NODISCARD StructureChain + PhysicalDevice::getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR & surfaceCapabilities = + structureChain.template get(); + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetPhysicalDeviceSurfaceCapabilities2KHR( + static_cast( m_physicalDevice ), + reinterpret_cast( &surfaceInfo ), + reinterpret_cast( &surfaceCapabilities ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2KHR" ); + } + return structureChain; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR + PhysicalDevice::getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const + { + VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR surfaceCapabilities; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + static_cast( m_physicalDevice ), + static_cast( surface ), + reinterpret_cast( &surfaceCapabilities ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilitiesKHR" ); + } + return surfaceCapabilities; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const + { + std::vector surfaceFormats; + uint32_t surfaceFormatCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->vkGetPhysicalDeviceSurfaceFormats2KHR( + static_cast( m_physicalDevice ), + reinterpret_cast( &surfaceInfo ), + &surfaceFormatCount, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && surfaceFormatCount ) + { + surfaceFormats.resize( surfaceFormatCount ); + result = static_cast( getDispatcher()->vkGetPhysicalDeviceSurfaceFormats2KHR( + static_cast( m_physicalDevice ), + reinterpret_cast( &surfaceInfo ), + &surfaceFormatCount, + reinterpret_cast( surfaceFormats.data() ) ) ); + VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( surfaceFormatCount < surfaceFormats.size() ) ) + { + surfaceFormats.resize( surfaceFormatCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormats2KHR" ); + } + return surfaceFormats; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const + { + std::vector surfaceFormats; + uint32_t surfaceFormatCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( + getDispatcher()->vkGetPhysicalDeviceSurfaceFormatsKHR( static_cast( m_physicalDevice ), + static_cast( surface ), + &surfaceFormatCount, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && surfaceFormatCount ) + { + surfaceFormats.resize( surfaceFormatCount ); + result = static_cast( getDispatcher()->vkGetPhysicalDeviceSurfaceFormatsKHR( + static_cast( m_physicalDevice ), + static_cast( surface ), + &surfaceFormatCount, + reinterpret_cast( surfaceFormats.data() ) ) ); + VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( surfaceFormatCount < surfaceFormats.size() ) ) + { + surfaceFormats.resize( surfaceFormatCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormatsKHR" ); + } + return surfaceFormats; + } + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const + { + std::vector presentModes; + uint32_t presentModeCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->vkGetPhysicalDeviceSurfacePresentModes2EXT( + static_cast( m_physicalDevice ), + reinterpret_cast( &surfaceInfo ), + &presentModeCount, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && presentModeCount ) + { + presentModes.resize( presentModeCount ); + result = + static_cast( getDispatcher()->vkGetPhysicalDeviceSurfacePresentModes2EXT( + static_cast( m_physicalDevice ), + reinterpret_cast( &surfaceInfo ), + &presentModeCount, + reinterpret_cast( presentModes.data() ) ) ); + VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( presentModeCount < presentModes.size() ) ) + { + presentModes.resize( presentModeCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModes2EXT" ); + } + return presentModes; + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const + { + std::vector presentModes; + uint32_t presentModeCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( + getDispatcher()->vkGetPhysicalDeviceSurfacePresentModesKHR( static_cast( m_physicalDevice ), + static_cast( surface ), + &presentModeCount, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && presentModeCount ) + { + presentModes.resize( presentModeCount ); + result = + static_cast( getDispatcher()->vkGetPhysicalDeviceSurfacePresentModesKHR( + static_cast( m_physicalDevice ), + static_cast( surface ), + &presentModeCount, + reinterpret_cast( presentModes.data() ) ) ); + VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( presentModeCount < presentModes.size() ) ) + { + presentModes.resize( presentModeCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModesKHR" ); + } + return presentModes; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Bool32 + PhysicalDevice::getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const + { + VULKAN_HPP_NAMESPACE::Bool32 supported; + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkGetPhysicalDeviceSurfaceSupportKHR( static_cast( m_physicalDevice ), + queueFamilyIndex, + static_cast( surface ), + reinterpret_cast( &supported ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceSupportKHR" ); + } + return supported; + } + + VULKAN_HPP_NODISCARD std::vector + PhysicalDevice::getToolPropertiesEXT() const + { + std::vector toolProperties; + uint32_t toolCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->vkGetPhysicalDeviceToolPropertiesEXT( + static_cast( m_physicalDevice ), &toolCount, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && toolCount ) + { + toolProperties.resize( toolCount ); + result = static_cast( getDispatcher()->vkGetPhysicalDeviceToolPropertiesEXT( + static_cast( m_physicalDevice ), + &toolCount, + reinterpret_cast( toolProperties.data() ) ) ); + VULKAN_HPP_ASSERT( toolCount <= toolProperties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( toolCount < toolProperties.size() ) ) + { + toolProperties.resize( toolCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getToolPropertiesEXT" ); + } + return toolProperties; + } + +# ifdef VK_USE_PLATFORM_WAYLAND_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Bool32 + PhysicalDevice::getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, + struct wl_display & display ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( + getDispatcher()->vkGetPhysicalDeviceWaylandPresentationSupportKHR( + static_cast( m_physicalDevice ), queueFamilyIndex, &display ) ); + } +# endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Bool32 + PhysicalDevice::getWin32PresentationSupportKHR( uint32_t queueFamilyIndex ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( getDispatcher()->vkGetPhysicalDeviceWin32PresentationSupportKHR( + static_cast( m_physicalDevice ), queueFamilyIndex ) ); + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +# ifdef VK_USE_PLATFORM_XCB_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Bool32 PhysicalDevice::getXcbPresentationSupportKHR( + uint32_t queueFamilyIndex, xcb_connection_t & connection, xcb_visualid_t visual_id ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( getDispatcher()->vkGetPhysicalDeviceXcbPresentationSupportKHR( + static_cast( m_physicalDevice ), queueFamilyIndex, &connection, visual_id ) ); + } +# endif /*VK_USE_PLATFORM_XCB_KHR*/ + +# ifdef VK_USE_PLATFORM_XLIB_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Bool32 PhysicalDevice::getXlibPresentationSupportKHR( + uint32_t queueFamilyIndex, Display & dpy, VisualID visualID ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( getDispatcher()->vkGetPhysicalDeviceXlibPresentationSupportKHR( + static_cast( m_physicalDevice ), queueFamilyIndex, &dpy, visualID ) ); + } +# endif /*VK_USE_PLATFORM_XLIB_KHR*/ + + VULKAN_HPP_NODISCARD std::pair + Device::acquireNextImage2KHR( const AcquireNextImageInfoKHR & acquireInfo ) const + { + uint32_t imageIndex; + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkAcquireNextImage2KHR( static_cast( m_device ), + reinterpret_cast( &acquireInfo ), + &imageIndex ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eTimeout ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eNotReady ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireNextImage2KHR" ); + } + return std::make_pair( result, imageIndex ); + } + + VULKAN_HPP_INLINE void Device::acquireProfilingLockKHR( const AcquireProfilingLockInfoKHR & info ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkAcquireProfilingLockKHR( + static_cast( m_device ), reinterpret_cast( &info ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireProfilingLockKHR" ); + } + } + + VULKAN_HPP_INLINE void Device::bindAccelerationStructureMemoryNV( + ArrayProxy const & bindInfos ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkBindAccelerationStructureMemoryNV( + static_cast( m_device ), + bindInfos.size(), + reinterpret_cast( bindInfos.data() ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindAccelerationStructureMemoryNV" ); + } + } + + VULKAN_HPP_INLINE void + Device::bindBufferMemory2( ArrayProxy const & bindInfos ) const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkBindBufferMemory2( static_cast( m_device ), + bindInfos.size(), + reinterpret_cast( bindInfos.data() ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory2" ); + } + } + + VULKAN_HPP_INLINE void + Device::bindImageMemory2( ArrayProxy const & bindInfos ) const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkBindImageMemory2( static_cast( m_device ), + bindInfos.size(), + reinterpret_cast( bindInfos.data() ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory2" ); + } + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Device::buildAccelerationStructuresKHR( + VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, + ArrayProxy const & infos, + ArrayProxy const & pBuildRangeInfos ) + const + { + if ( infos.size() != pBuildRangeInfos.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING + "::Device::buildAccelerationStructuresKHR: infos.size() != pBuildRangeInfos.size()" ); + } + + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkBuildAccelerationStructuresKHR( + static_cast( m_device ), + static_cast( deferredOperation ), + infos.size(), + reinterpret_cast( infos.data() ), + reinterpret_cast( pBuildRangeInfos.data() ) ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::buildAccelerationStructuresKHR" ); + } + return result; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result + Device::copyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, + const CopyAccelerationStructureInfoKHR & info ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCopyAccelerationStructureKHR( + static_cast( m_device ), + static_cast( deferredOperation ), + reinterpret_cast( &info ) ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyAccelerationStructureKHR" ); + } + return result; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result + Device::copyAccelerationStructureToMemoryKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, + const CopyAccelerationStructureToMemoryInfoKHR & info ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCopyAccelerationStructureToMemoryKHR( + static_cast( m_device ), + static_cast( deferredOperation ), + reinterpret_cast( &info ) ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyAccelerationStructureToMemoryKHR" ); + } + return result; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result + Device::copyMemoryToAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, + const CopyMemoryToAccelerationStructureInfoKHR & info ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCopyMemoryToAccelerationStructureKHR( + static_cast( m_device ), + static_cast( deferredOperation ), + reinterpret_cast( &info ) ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToAccelerationStructureKHR" ); + } + return result; + } + + VULKAN_HPP_INLINE void Device::debugMarkerSetObjectNameEXT( const DebugMarkerObjectNameInfoEXT & nameInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkDebugMarkerSetObjectNameEXT( + static_cast( m_device ), reinterpret_cast( &nameInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectNameEXT" ); + } + } + + VULKAN_HPP_INLINE void Device::debugMarkerSetObjectTagEXT( const DebugMarkerObjectTagInfoEXT & tagInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkDebugMarkerSetObjectTagEXT( + static_cast( m_device ), reinterpret_cast( &tagInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectTagEXT" ); + } + } + + VULKAN_HPP_INLINE void Device::waitIdle() const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkDeviceWaitIdle( static_cast( m_device ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitIdle" ); + } + } + + VULKAN_HPP_INLINE void Device::displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, + const DisplayPowerInfoEXT & displayPowerInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkDisplayPowerControlEXT( + static_cast( m_device ), + static_cast( display ), + reinterpret_cast( &displayPowerInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::displayPowerControlEXT" ); + } + } + + VULKAN_HPP_INLINE void Device::flushMappedMemoryRanges( + ArrayProxy const & memoryRanges ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkFlushMappedMemoryRanges( + static_cast( m_device ), + memoryRanges.size(), + reinterpret_cast( memoryRanges.data() ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::flushMappedMemoryRanges" ); + } + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR + Device::getAccelerationStructureBuildSizesKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR buildType, + const AccelerationStructureBuildGeometryInfoKHR & buildInfo, + ArrayProxy const & maxPrimitiveCounts ) const + VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR sizeInfo; + getDispatcher()->vkGetAccelerationStructureBuildSizesKHR( + static_cast( m_device ), + static_cast( buildType ), + reinterpret_cast( &buildInfo ), + maxPrimitiveCounts.data(), + reinterpret_cast( &sizeInfo ) ); + return sizeInfo; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DeviceAddress + Device::getAccelerationStructureAddressKHR( const AccelerationStructureDeviceAddressInfoKHR & info ) const + VULKAN_HPP_NOEXCEPT + { + return static_cast( + getDispatcher()->vkGetAccelerationStructureDeviceAddressKHR( + static_cast( m_device ), + reinterpret_cast( &info ) ) ); + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR + Device::getAccelerationStructureMemoryRequirementsNV( + const AccelerationStructureMemoryRequirementsInfoNV & info ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR memoryRequirements; + getDispatcher()->vkGetAccelerationStructureMemoryRequirementsNV( + static_cast( m_device ), + reinterpret_cast( &info ), + reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + + template + VULKAN_HPP_NODISCARD StructureChain Device::getAccelerationStructureMemoryRequirementsNV( + const AccelerationStructureMemoryRequirementsInfoNV & info ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR & memoryRequirements = + structureChain.template get(); + getDispatcher()->vkGetAccelerationStructureMemoryRequirementsNV( + static_cast( m_device ), + reinterpret_cast( &info ), + reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } + +# ifdef VK_USE_PLATFORM_ANDROID_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID + Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer ) const + { + VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID properties; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetAndroidHardwareBufferPropertiesANDROID( + static_cast( m_device ), + &buffer, + reinterpret_cast( &properties ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::getAndroidHardwareBufferPropertiesANDROID" ); + } + return properties; + } +# endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + +# ifdef VK_USE_PLATFORM_ANDROID_KHR + template + VULKAN_HPP_NODISCARD StructureChain + Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer ) const + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID & properties = + structureChain.template get(); + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetAndroidHardwareBufferPropertiesANDROID( + static_cast( m_device ), + &buffer, + reinterpret_cast( &properties ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::getAndroidHardwareBufferPropertiesANDROID" ); + } + return structureChain; + } +# endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DeviceAddress + Device::getBufferAddress( const BufferDeviceAddressInfo & info ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( getDispatcher()->vkGetBufferDeviceAddress( + static_cast( m_device ), reinterpret_cast( &info ) ) ); + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 + Device::getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; + getDispatcher()->vkGetBufferMemoryRequirements2( + static_cast( m_device ), + reinterpret_cast( &info ), + reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + + template + VULKAN_HPP_NODISCARD StructureChain + Device::getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::MemoryRequirements2 & memoryRequirements = + structureChain.template get(); + getDispatcher()->vkGetBufferMemoryRequirements2( + static_cast( m_device ), + reinterpret_cast( &info ), + reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } + + VULKAN_HPP_NODISCARD uint64_t + Device::getBufferOpaqueCaptureAddress( const BufferDeviceAddressInfo & info ) const VULKAN_HPP_NOEXCEPT + { + return getDispatcher()->vkGetBufferOpaqueCaptureAddress( + static_cast( m_device ), reinterpret_cast( &info ) ); + } + + VULKAN_HPP_NODISCARD std::pair, uint64_t> Device::getCalibratedTimestampsEXT( + ArrayProxy const & timestampInfos ) const + { + std::pair, uint64_t> data( + std::piecewise_construct, std::forward_as_tuple( timestampInfos.size() ), std::forward_as_tuple( 0 ) ); + std::vector & timestamps = data.first; + uint64_t & maxDeviation = data.second; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetCalibratedTimestampsEXT( + static_cast( m_device ), + timestampInfos.size(), + reinterpret_cast( timestampInfos.data() ), + timestamps.data(), + &maxDeviation ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsEXT" ); + } + return data; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport Device::getDescriptorSetLayoutSupport( + const DescriptorSetLayoutCreateInfo & createInfo ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport support; + getDispatcher()->vkGetDescriptorSetLayoutSupport( + static_cast( m_device ), + reinterpret_cast( &createInfo ), + reinterpret_cast( &support ) ); + return support; + } + + template + VULKAN_HPP_NODISCARD StructureChain Device::getDescriptorSetLayoutSupport( + const DescriptorSetLayoutCreateInfo & createInfo ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport & support = + structureChain.template get(); + getDispatcher()->vkGetDescriptorSetLayoutSupport( + static_cast( m_device ), + reinterpret_cast( &createInfo ), + reinterpret_cast( &support ) ); + return structureChain; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::AccelerationStructureCompatibilityKHR + Device::getAccelerationStructureCompatibilityKHR( const AccelerationStructureVersionInfoKHR & versionInfo ) const + VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::AccelerationStructureCompatibilityKHR compatibility; + getDispatcher()->vkGetDeviceAccelerationStructureCompatibilityKHR( + static_cast( m_device ), + reinterpret_cast( &versionInfo ), + reinterpret_cast( &compatibility ) ); + return compatibility; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags Device::getGroupPeerMemoryFeatures( + uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags peerMemoryFeatures; + getDispatcher()->vkGetDeviceGroupPeerMemoryFeatures( + static_cast( m_device ), + heapIndex, + localDeviceIndex, + remoteDeviceIndex, + reinterpret_cast( &peerMemoryFeatures ) ); + return peerMemoryFeatures; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR + Device::getGroupPresentCapabilitiesKHR() const + { + VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR deviceGroupPresentCapabilities; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetDeviceGroupPresentCapabilitiesKHR( + static_cast( m_device ), + reinterpret_cast( &deviceGroupPresentCapabilities ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupPresentCapabilitiesKHR" ); + } + return deviceGroupPresentCapabilities; + } + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR + Device::getGroupSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const + { + VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetDeviceGroupSurfacePresentModes2EXT( + static_cast( m_device ), + reinterpret_cast( &surfaceInfo ), + reinterpret_cast( &modes ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModes2EXT" ); + } + return modes; + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR + Device::getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface ) const + { + VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetDeviceGroupSurfacePresentModesKHR( + static_cast( m_device ), + static_cast( surface ), + reinterpret_cast( &modes ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModesKHR" ); + } + return modes; + } + + VULKAN_HPP_NODISCARD uint64_t Device::getMemoryOpaqueCaptureAddress( + const DeviceMemoryOpaqueCaptureAddressInfo & info ) const VULKAN_HPP_NOEXCEPT + { + return getDispatcher()->vkGetDeviceMemoryOpaqueCaptureAddress( + static_cast( m_device ), reinterpret_cast( &info ) ); + } + + VULKAN_HPP_NODISCARD PFN_vkVoidFunction Device::getProcAddr( const std::string & name ) const VULKAN_HPP_NOEXCEPT + { + return getDispatcher()->vkGetDeviceProcAddr( static_cast( m_device ), name.c_str() ); + } + + VULKAN_HPP_NODISCARD int Device::getFenceFdKHR( const FenceGetFdInfoKHR & getFdInfo ) const + { + int fd; + VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetFenceFdKHR( + static_cast( m_device ), reinterpret_cast( &getFdInfo ), &fd ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceFdKHR" ); + } + return fd; + } + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD HANDLE + Device::getFenceWin32HandleKHR( const FenceGetWin32HandleInfoKHR & getWin32HandleInfo ) const + { + HANDLE handle; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetFenceWin32HandleKHR( + static_cast( m_device ), + reinterpret_cast( &getWin32HandleInfo ), + &handle ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceWin32HandleKHR" ); + } + return handle; + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getGeneratedCommandsMemoryRequirementsNV( + const GeneratedCommandsMemoryRequirementsInfoNV & info ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; + getDispatcher()->vkGetGeneratedCommandsMemoryRequirementsNV( + static_cast( m_device ), + reinterpret_cast( &info ), + reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + + template + VULKAN_HPP_NODISCARD StructureChain Device::getGeneratedCommandsMemoryRequirementsNV( + const GeneratedCommandsMemoryRequirementsInfoNV & info ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::MemoryRequirements2 & memoryRequirements = + structureChain.template get(); + getDispatcher()->vkGetGeneratedCommandsMemoryRequirementsNV( + static_cast( m_device ), + reinterpret_cast( &info ), + reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 + Device::getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; + getDispatcher()->vkGetImageMemoryRequirements2( + static_cast( m_device ), + reinterpret_cast( &info ), + reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + + template + VULKAN_HPP_NODISCARD StructureChain + Device::getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::MemoryRequirements2 & memoryRequirements = + structureChain.template get(); + getDispatcher()->vkGetImageMemoryRequirements2( + static_cast( m_device ), + reinterpret_cast( &info ), + reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } + + VULKAN_HPP_NODISCARD std::vector + Device::getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info ) const + VULKAN_HPP_NOEXCEPT + { + uint32_t sparseMemoryRequirementCount; + getDispatcher()->vkGetImageSparseMemoryRequirements2( + static_cast( m_device ), + reinterpret_cast( &info ), + &sparseMemoryRequirementCount, + nullptr ); + std::vector sparseMemoryRequirements( + sparseMemoryRequirementCount ); + getDispatcher()->vkGetImageSparseMemoryRequirements2( + static_cast( m_device ), + reinterpret_cast( &info ), + &sparseMemoryRequirementCount, + reinterpret_cast( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); + return sparseMemoryRequirements; + } + + VULKAN_HPP_NODISCARD uint32_t + Device::getImageViewHandleNVX( const ImageViewHandleInfoNVX & info ) const VULKAN_HPP_NOEXCEPT + { + return getDispatcher()->vkGetImageViewHandleNVX( static_cast( m_device ), + reinterpret_cast( &info ) ); + } + +# ifdef VK_USE_PLATFORM_ANDROID_KHR + VULKAN_HPP_NODISCARD struct AHardwareBuffer * + Device::getMemoryAndroidHardwareBufferANDROID( const MemoryGetAndroidHardwareBufferInfoANDROID & info ) const + { + struct AHardwareBuffer * buffer; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetMemoryAndroidHardwareBufferANDROID( + static_cast( m_device ), + reinterpret_cast( &info ), + &buffer ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryAndroidHardwareBufferANDROID" ); + } + return buffer; + } +# endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + VULKAN_HPP_NODISCARD int Device::getMemoryFdKHR( const MemoryGetFdInfoKHR & getFdInfo ) const + { + int fd; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetMemoryFdKHR( + static_cast( m_device ), reinterpret_cast( &getFdInfo ), &fd ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdKHR" ); + } + return fd; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR + Device::getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, + int fd ) const + { + VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR memoryFdProperties; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetMemoryFdPropertiesKHR( + static_cast( m_device ), + static_cast( handleType ), + fd, + reinterpret_cast( &memoryFdProperties ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdPropertiesKHR" ); + } + return memoryFdProperties; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT + Device::getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, + const void * pHostPointer ) const + { + VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT memoryHostPointerProperties; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetMemoryHostPointerPropertiesEXT( + static_cast( m_device ), + static_cast( handleType ), + pHostPointer, + reinterpret_cast( &memoryHostPointerProperties ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryHostPointerPropertiesEXT" ); + } + return memoryHostPointerProperties; + } + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD HANDLE + Device::getMemoryWin32HandleKHR( const MemoryGetWin32HandleInfoKHR & getWin32HandleInfo ) const + { + HANDLE handle; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetMemoryWin32HandleKHR( + static_cast( m_device ), + reinterpret_cast( &getWin32HandleInfo ), + &handle ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandleKHR" ); + } + return handle; + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR + Device::getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, + HANDLE handle ) const + { + VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR memoryWin32HandleProperties; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetMemoryWin32HandlePropertiesKHR( + static_cast( m_device ), + static_cast( handleType ), + handle, + reinterpret_cast( &memoryWin32HandleProperties ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandlePropertiesKHR" ); + } + return memoryWin32HandleProperties; + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PerformanceValueINTEL + Device::getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter ) const + { + VULKAN_HPP_NAMESPACE::PerformanceValueINTEL value; + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkGetPerformanceParameterINTEL( static_cast( m_device ), + static_cast( parameter ), + reinterpret_cast( &value ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPerformanceParameterINTEL" ); + } + return value; + } + + VULKAN_HPP_NODISCARD std::vector + Device::getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo ) const + { + std::vector internalRepresentations; + uint32_t internalRepresentationCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = + static_cast( getDispatcher()->vkGetPipelineExecutableInternalRepresentationsKHR( + static_cast( m_device ), + reinterpret_cast( &executableInfo ), + &internalRepresentationCount, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && internalRepresentationCount ) + { + internalRepresentations.resize( internalRepresentationCount ); + result = static_cast( + getDispatcher()->vkGetPipelineExecutableInternalRepresentationsKHR( + static_cast( m_device ), + reinterpret_cast( &executableInfo ), + &internalRepresentationCount, + reinterpret_cast( internalRepresentations.data() ) ) ); + VULKAN_HPP_ASSERT( internalRepresentationCount <= internalRepresentations.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( internalRepresentationCount < internalRepresentations.size() ) ) + { + internalRepresentations.resize( internalRepresentationCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableInternalRepresentationsKHR" ); + } + return internalRepresentations; + } + + VULKAN_HPP_NODISCARD std::vector + Device::getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo ) const + { + std::vector properties; + uint32_t executableCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->vkGetPipelineExecutablePropertiesKHR( + static_cast( m_device ), + reinterpret_cast( &pipelineInfo ), + &executableCount, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && executableCount ) + { + properties.resize( executableCount ); + result = static_cast( getDispatcher()->vkGetPipelineExecutablePropertiesKHR( + static_cast( m_device ), + reinterpret_cast( &pipelineInfo ), + &executableCount, + reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( executableCount <= properties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( executableCount < properties.size() ) ) + { + properties.resize( executableCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutablePropertiesKHR" ); + } + return properties; + } + + VULKAN_HPP_NODISCARD std::vector + Device::getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo ) const + { + std::vector statistics; + uint32_t statisticCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->vkGetPipelineExecutableStatisticsKHR( + static_cast( m_device ), + reinterpret_cast( &executableInfo ), + &statisticCount, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && statisticCount ) + { + statistics.resize( statisticCount ); + result = static_cast( getDispatcher()->vkGetPipelineExecutableStatisticsKHR( + static_cast( m_device ), + reinterpret_cast( &executableInfo ), + &statisticCount, + reinterpret_cast( statistics.data() ) ) ); + VULKAN_HPP_ASSERT( statisticCount <= statistics.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( statisticCount < statistics.size() ) ) + { + statistics.resize( statisticCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableStatisticsKHR" ); + } + return statistics; + } + + VULKAN_HPP_NODISCARD uint64_t + Device::getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType_, + uint64_t objectHandle, + VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot ) const VULKAN_HPP_NOEXCEPT + { + uint64_t data; + getDispatcher()->vkGetPrivateDataEXT( static_cast( m_device ), + static_cast( objectType_ ), + objectHandle, + static_cast( privateDataSlot ), + &data ); + return data; + } + + VULKAN_HPP_NODISCARD int Device::getSemaphoreFdKHR( const SemaphoreGetFdInfoKHR & getFdInfo ) const + { + int fd; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetSemaphoreFdKHR( + static_cast( m_device ), reinterpret_cast( &getFdInfo ), &fd ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreFdKHR" ); + } + return fd; + } + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD HANDLE + Device::getSemaphoreWin32HandleKHR( const SemaphoreGetWin32HandleInfoKHR & getWin32HandleInfo ) const + { + HANDLE handle; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetSemaphoreWin32HandleKHR( + static_cast( m_device ), + reinterpret_cast( &getWin32HandleInfo ), + &handle ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreWin32HandleKHR" ); + } + return handle; + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_INLINE void Device::importFenceFdKHR( const ImportFenceFdInfoKHR & importFenceFdInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkImportFenceFdKHR( + static_cast( m_device ), reinterpret_cast( &importFenceFdInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceFdKHR" ); + } + } + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_INLINE void + Device::importFenceWin32HandleKHR( const ImportFenceWin32HandleInfoKHR & importFenceWin32HandleInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkImportFenceWin32HandleKHR( + static_cast( m_device ), + reinterpret_cast( &importFenceWin32HandleInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceWin32HandleKHR" ); + } + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_INLINE void Device::importSemaphoreFdKHR( const ImportSemaphoreFdInfoKHR & importSemaphoreFdInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkImportSemaphoreFdKHR( + static_cast( m_device ), + reinterpret_cast( &importSemaphoreFdInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreFdKHR" ); + } + } + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_INLINE void Device::importSemaphoreWin32HandleKHR( + const ImportSemaphoreWin32HandleInfoKHR & importSemaphoreWin32HandleInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkImportSemaphoreWin32HandleKHR( + static_cast( m_device ), + reinterpret_cast( &importSemaphoreWin32HandleInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreWin32HandleKHR" ); + } + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_INLINE void + Device::initializePerformanceApiINTEL( const InitializePerformanceApiInfoINTEL & initializeInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkInitializePerformanceApiINTEL( + static_cast( m_device ), + reinterpret_cast( &initializeInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::initializePerformanceApiINTEL" ); + } + } + + VULKAN_HPP_INLINE void Device::invalidateMappedMemoryRanges( + ArrayProxy const & memoryRanges ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkInvalidateMappedMemoryRanges( + static_cast( m_device ), + memoryRanges.size(), + reinterpret_cast( memoryRanges.data() ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::invalidateMappedMemoryRanges" ); + } + } + + VULKAN_HPP_INLINE void Device::releaseProfilingLockKHR() const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkReleaseProfilingLockKHR( static_cast( m_device ) ); + } + + VULKAN_HPP_INLINE void Device::resetFences( ArrayProxy const & fences ) const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkResetFences( + static_cast( m_device ), fences.size(), reinterpret_cast( fences.data() ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetFences" ); + } + } + + VULKAN_HPP_INLINE void Device::setDebugUtilsObjectNameEXT( const DebugUtilsObjectNameInfoEXT & nameInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkSetDebugUtilsObjectNameEXT( + static_cast( m_device ), reinterpret_cast( &nameInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectNameEXT" ); + } + } + + VULKAN_HPP_INLINE void Device::setDebugUtilsObjectTagEXT( const DebugUtilsObjectTagInfoEXT & tagInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkSetDebugUtilsObjectTagEXT( + static_cast( m_device ), reinterpret_cast( &tagInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectTagEXT" ); + } + } + + VULKAN_HPP_INLINE void + Device::setHdrMetadataEXT( ArrayProxy const & swapchains, + ArrayProxy const & metadata ) const + VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +# ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( swapchains.size() == metadata.size() ); +# else + if ( swapchains.size() != metadata.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING + "::Device::setHdrMetadataEXT: swapchains.size() != metadata.size()" ); + } +# endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + getDispatcher()->vkSetHdrMetadataEXT( static_cast( m_device ), + swapchains.size(), + reinterpret_cast( swapchains.data() ), + reinterpret_cast( metadata.data() ) ); + } + + VULKAN_HPP_INLINE void Device::setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType_, + uint64_t objectHandle, + VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, + uint64_t data ) const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkSetPrivateDataEXT( static_cast( m_device ), + static_cast( objectType_ ), + objectHandle, + static_cast( privateDataSlot ), + data ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setPrivateDataEXT" ); + } + } + + VULKAN_HPP_INLINE void Device::signalSemaphore( const SemaphoreSignalInfo & signalInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkSignalSemaphore( + static_cast( m_device ), reinterpret_cast( &signalInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::signalSemaphore" ); + } + } + + VULKAN_HPP_INLINE void Device::uninitializePerformanceApiINTEL() const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkUninitializePerformanceApiINTEL( static_cast( m_device ) ); + } + + VULKAN_HPP_INLINE void Device::updateDescriptorSets( + ArrayProxy const & descriptorWrites, + ArrayProxy const & descriptorCopies ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkUpdateDescriptorSets( + static_cast( m_device ), + descriptorWrites.size(), + reinterpret_cast( descriptorWrites.data() ), + descriptorCopies.size(), + reinterpret_cast( descriptorCopies.data() ) ); + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result + Device::waitForFences( ArrayProxy const & fences, + VULKAN_HPP_NAMESPACE::Bool32 waitAll, + uint64_t timeout ) const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkWaitForFences( static_cast( m_device ), + fences.size(), + reinterpret_cast( fences.data() ), + static_cast( waitAll ), + timeout ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eTimeout ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitForFences" ); + } + return result; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result + Device::waitSemaphores( const SemaphoreWaitInfo & waitInfo, uint64_t timeout ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkWaitSemaphores( + static_cast( m_device ), reinterpret_cast( &waitInfo ), timeout ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eTimeout ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitSemaphores" ); + } + return result; + } + + template + VULKAN_HPP_NODISCARD std::vector Device::writeAccelerationStructuresPropertiesKHR( + ArrayProxy const & accelerationStructures, + VULKAN_HPP_NAMESPACE::QueryType queryType, + size_t dataSize, + size_t stride ) const + { + VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); + std::vector data( dataSize / sizeof( T ) ); + Result result = static_cast( getDispatcher()->vkWriteAccelerationStructuresPropertiesKHR( + static_cast( m_device ), + accelerationStructures.size(), + reinterpret_cast( accelerationStructures.data() ), + static_cast( queryType ), + data.size() * sizeof( T ), + reinterpret_cast( data.data() ), + stride ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertiesKHR" ); + } + return data; + } + + template + VULKAN_HPP_NODISCARD T Device::writeAccelerationStructuresPropertyKHR( + ArrayProxy const & accelerationStructures, + VULKAN_HPP_NAMESPACE::QueryType queryType, + size_t stride ) const + { + T data; + Result result = static_cast( getDispatcher()->vkWriteAccelerationStructuresPropertiesKHR( + static_cast( m_device ), + accelerationStructures.size(), + reinterpret_cast( accelerationStructures.data() ), + static_cast( queryType ), + sizeof( T ), + reinterpret_cast( &data ), + stride ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertyKHR" ); + } + return data; + } + + template + VULKAN_HPP_NODISCARD std::vector AccelerationStructureNV::getHandle( size_t dataSize ) const + { + VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); + std::vector data( dataSize / sizeof( T ) ); + Result result = static_cast( getDispatcher()->vkGetAccelerationStructureHandleNV( + static_cast( m_device ), + static_cast( m_accelerationStructureNV ), + data.size() * sizeof( T ), + reinterpret_cast( data.data() ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::AccelerationStructureNV::getHandle" ); + } + return data; + } + + template + VULKAN_HPP_NODISCARD T AccelerationStructureNV::getHandle() const + { + T data; + Result result = static_cast( getDispatcher()->vkGetAccelerationStructureHandleNV( + static_cast( m_device ), + static_cast( m_accelerationStructureNV ), + sizeof( T ), + reinterpret_cast( &data ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::AccelerationStructureNV::getHandle" ); + } + return data; + } + + VULKAN_HPP_INLINE void Buffer::bindMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, + VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset ) const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkBindBufferMemory( static_cast( m_device ), + static_cast( m_buffer ), + static_cast( memory ), + static_cast( memoryOffset ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Buffer::bindMemory" ); + } + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements + Buffer::getMemoryRequirements() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements memoryRequirements; + getDispatcher()->vkGetBufferMemoryRequirements( static_cast( m_device ), + static_cast( m_buffer ), + reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + + VULKAN_HPP_INLINE void CommandPool::reset( VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags ) const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkResetCommandPool( static_cast( m_device ), + static_cast( m_commandPool ), + static_cast( flags ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::CommandPool::reset" ); + } + } + + VULKAN_HPP_INLINE void + CommandPool::trim( VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkTrimCommandPool( static_cast( m_device ), + static_cast( m_commandPool ), + static_cast( flags ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::begin( const CommandBufferBeginInfo & beginInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkBeginCommandBuffer( static_cast( m_commandBuffer ), + reinterpret_cast( &beginInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::begin" ); + } + } + + VULKAN_HPP_INLINE void CommandBuffer::beginConditionalRenderingEXT( + const ConditionalRenderingBeginInfoEXT & conditionalRenderingBegin ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBeginConditionalRenderingEXT( + static_cast( m_commandBuffer ), + reinterpret_cast( &conditionalRenderingBegin ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBeginDebugUtilsLabelEXT( static_cast( m_commandBuffer ), + reinterpret_cast( &labelInfo ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::beginQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t query, + VULKAN_HPP_NAMESPACE::QueryControlFlags flags ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBeginQuery( static_cast( m_commandBuffer ), + static_cast( queryPool ), + query, + static_cast( flags ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::beginQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t query, + VULKAN_HPP_NAMESPACE::QueryControlFlags flags, + uint32_t index ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBeginQueryIndexedEXT( static_cast( m_commandBuffer ), + static_cast( queryPool ), + query, + static_cast( flags ), + index ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::beginRenderPass( const RenderPassBeginInfo & renderPassBegin, + VULKAN_HPP_NAMESPACE::SubpassContents contents ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBeginRenderPass( static_cast( m_commandBuffer ), + reinterpret_cast( &renderPassBegin ), + static_cast( contents ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::beginRenderPass2( const RenderPassBeginInfo & renderPassBegin, + const SubpassBeginInfo & subpassBeginInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBeginRenderPass2( static_cast( m_commandBuffer ), + reinterpret_cast( &renderPassBegin ), + reinterpret_cast( &subpassBeginInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::beginTransformFeedbackEXT( + uint32_t firstCounterBuffer, + ArrayProxy const & counterBuffers, + ArrayProxy const & counterBufferOffsets ) const + VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +# ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( counterBufferOffsets.empty() || counterBuffers.size() == counterBufferOffsets.size() ); +# else + if ( !counterBufferOffsets.empty() && counterBuffers.size() != counterBufferOffsets.size() ) + { + throw LogicError( + VULKAN_HPP_NAMESPACE_STRING + "::CommandBuffer::beginTransformFeedbackEXT: counterBuffers.size() != counterBufferOffsets.size()" ); + } +# endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + getDispatcher()->vkCmdBeginTransformFeedbackEXT( + static_cast( m_commandBuffer ), + firstCounterBuffer, + counterBuffers.size(), + reinterpret_cast( counterBuffers.data() ), + reinterpret_cast( counterBufferOffsets.data() ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t firstSet, + ArrayProxy const & descriptorSets, + ArrayProxy const & dynamicOffsets ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBindDescriptorSets( static_cast( m_commandBuffer ), + static_cast( pipelineBindPoint ), + static_cast( layout ), + firstSet, + descriptorSets.size(), + reinterpret_cast( descriptorSets.data() ), + dynamicOffsets.size(), + dynamicOffsets.data() ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::bindIndexBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::IndexType indexType ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBindIndexBuffer( static_cast( m_commandBuffer ), + static_cast( buffer ), + static_cast( offset ), + static_cast( indexType ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::bindPipeline( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::Pipeline pipeline ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBindPipeline( static_cast( m_commandBuffer ), + static_cast( pipelineBindPoint ), + static_cast( pipeline ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::bindPipelineShaderGroupNV( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::Pipeline pipeline, + uint32_t groupIndex ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBindPipelineShaderGroupNV( static_cast( m_commandBuffer ), + static_cast( pipelineBindPoint ), + static_cast( pipeline ), + groupIndex ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::bindShadingRateImageNV( VULKAN_HPP_NAMESPACE::ImageView imageView, + VULKAN_HPP_NAMESPACE::ImageLayout imageLayout ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBindShadingRateImageNV( static_cast( m_commandBuffer ), + static_cast( imageView ), + static_cast( imageLayout ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::bindTransformFeedbackBuffersEXT( + uint32_t firstBinding, + ArrayProxy const & buffers, + ArrayProxy const & offsets, + ArrayProxy const & sizes ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +# ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( buffers.size() == offsets.size() ); + VULKAN_HPP_ASSERT( sizes.empty() || buffers.size() == sizes.size() ); +# else + if ( buffers.size() != offsets.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING + "::CommandBuffer::bindTransformFeedbackBuffersEXT: buffers.size() != offsets.size()" ); + } + if ( !sizes.empty() && buffers.size() != sizes.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING + "::CommandBuffer::bindTransformFeedbackBuffersEXT: buffers.size() != sizes.size()" ); + } +# endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + getDispatcher()->vkCmdBindTransformFeedbackBuffersEXT( static_cast( m_commandBuffer ), + firstBinding, + buffers.size(), + reinterpret_cast( buffers.data() ), + reinterpret_cast( offsets.data() ), + reinterpret_cast( sizes.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers( + uint32_t firstBinding, + ArrayProxy const & buffers, + ArrayProxy const & offsets ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +# ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( buffers.size() == offsets.size() ); +# else + if ( buffers.size() != offsets.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING + "::CommandBuffer::bindVertexBuffers: buffers.size() != offsets.size()" ); + } +# endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + getDispatcher()->vkCmdBindVertexBuffers( static_cast( m_commandBuffer ), + firstBinding, + buffers.size(), + reinterpret_cast( buffers.data() ), + reinterpret_cast( offsets.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers2EXT( + uint32_t firstBinding, + ArrayProxy const & buffers, + ArrayProxy const & offsets, + ArrayProxy const & sizes, + ArrayProxy const & strides ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +# ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( buffers.size() == offsets.size() ); + VULKAN_HPP_ASSERT( sizes.empty() || buffers.size() == sizes.size() ); + VULKAN_HPP_ASSERT( strides.empty() || buffers.size() == strides.size() ); +# else + if ( buffers.size() != offsets.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING + "::CommandBuffer::bindVertexBuffers2EXT: buffers.size() != offsets.size()" ); + } + if ( !sizes.empty() && buffers.size() != sizes.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING + "::CommandBuffer::bindVertexBuffers2EXT: buffers.size() != sizes.size()" ); + } + if ( !strides.empty() && buffers.size() != strides.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING + "::CommandBuffer::bindVertexBuffers2EXT: buffers.size() != strides.size()" ); + } +# endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + getDispatcher()->vkCmdBindVertexBuffers2EXT( static_cast( m_commandBuffer ), + firstBinding, + buffers.size(), + reinterpret_cast( buffers.data() ), + reinterpret_cast( offsets.data() ), + reinterpret_cast( sizes.data() ), + reinterpret_cast( strides.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, + VULKAN_HPP_NAMESPACE::Image dstImage, + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, + ArrayProxy const & regions, + VULKAN_HPP_NAMESPACE::Filter filter ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBlitImage( static_cast( m_commandBuffer ), + static_cast( srcImage ), + static_cast( srcImageLayout ), + static_cast( dstImage ), + static_cast( dstImageLayout ), + regions.size(), + reinterpret_cast( regions.data() ), + static_cast( filter ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::blitImage2KHR( const BlitImageInfo2KHR & blitImageInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBlitImage2KHR( static_cast( m_commandBuffer ), + reinterpret_cast( &blitImageInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureNV( + const AccelerationStructureInfoNV & info, + VULKAN_HPP_NAMESPACE::Buffer instanceData, + VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, + VULKAN_HPP_NAMESPACE::Bool32 update, + VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, + VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, + VULKAN_HPP_NAMESPACE::Buffer scratch, + VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdBuildAccelerationStructureNV( + static_cast( m_commandBuffer ), + reinterpret_cast( &info ), + static_cast( instanceData ), + static_cast( instanceOffset ), + static_cast( update ), + static_cast( dst ), + static_cast( src ), + static_cast( scratch ), + static_cast( scratchOffset ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructuresIndirectKHR( + ArrayProxy const & infos, + ArrayProxy const & indirectDeviceAddresses, + ArrayProxy const & indirectStrides, + ArrayProxy const & pMaxPrimitiveCounts ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +# ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( infos.size() == indirectDeviceAddresses.size() ); + VULKAN_HPP_ASSERT( infos.size() == indirectStrides.size() ); + VULKAN_HPP_ASSERT( infos.size() == pMaxPrimitiveCounts.size() ); +# else + if ( infos.size() != indirectDeviceAddresses.size() ) + { + throw LogicError( + VULKAN_HPP_NAMESPACE_STRING + "::CommandBuffer::buildAccelerationStructuresIndirectKHR: infos.size() != indirectDeviceAddresses.size()" ); + } + if ( infos.size() != indirectStrides.size() ) + { + throw LogicError( + VULKAN_HPP_NAMESPACE_STRING + "::CommandBuffer::buildAccelerationStructuresIndirectKHR: infos.size() != indirectStrides.size()" ); + } + if ( infos.size() != pMaxPrimitiveCounts.size() ) + { + throw LogicError( + VULKAN_HPP_NAMESPACE_STRING + "::CommandBuffer::buildAccelerationStructuresIndirectKHR: infos.size() != pMaxPrimitiveCounts.size()" ); + } +# endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + getDispatcher()->vkCmdBuildAccelerationStructuresIndirectKHR( + static_cast( m_commandBuffer ), + infos.size(), + reinterpret_cast( infos.data() ), + reinterpret_cast( indirectDeviceAddresses.data() ), + indirectStrides.data(), + pMaxPrimitiveCounts.data() ); + } + + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructuresKHR( + ArrayProxy const & infos, + ArrayProxy const & pBuildRangeInfos ) + const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +# ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( infos.size() == pBuildRangeInfos.size() ); +# else + if ( infos.size() != pBuildRangeInfos.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING + "::CommandBuffer::buildAccelerationStructuresKHR: infos.size() != pBuildRangeInfos.size()" ); + } +# endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + getDispatcher()->vkCmdBuildAccelerationStructuresKHR( + static_cast( m_commandBuffer ), + infos.size(), + reinterpret_cast( infos.data() ), + reinterpret_cast( pBuildRangeInfos.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::clearAttachments( + ArrayProxy const & attachments, + ArrayProxy const & rects ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdClearAttachments( static_cast( m_commandBuffer ), + attachments.size(), + reinterpret_cast( attachments.data() ), + rects.size(), + reinterpret_cast( rects.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::clearColorImage( + VULKAN_HPP_NAMESPACE::Image image, + VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, + const ClearColorValue & color, + ArrayProxy const & ranges ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdClearColorImage( static_cast( m_commandBuffer ), + static_cast( image ), + static_cast( imageLayout ), + reinterpret_cast( &color ), + ranges.size(), + reinterpret_cast( ranges.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::clearDepthStencilImage( + VULKAN_HPP_NAMESPACE::Image image, + VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, + const ClearDepthStencilValue & depthStencil, + ArrayProxy const & ranges ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdClearDepthStencilImage( + static_cast( m_commandBuffer ), + static_cast( image ), + static_cast( imageLayout ), + reinterpret_cast( &depthStencil ), + ranges.size(), + reinterpret_cast( ranges.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureKHR( + const CopyAccelerationStructureInfoKHR & info ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdCopyAccelerationStructureKHR( + static_cast( m_commandBuffer ), + reinterpret_cast( &info ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureNV( + VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, + VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, + VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdCopyAccelerationStructureNV( static_cast( m_commandBuffer ), + static_cast( dst ), + static_cast( src ), + static_cast( mode ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureToMemoryKHR( + const CopyAccelerationStructureToMemoryInfoKHR & info ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdCopyAccelerationStructureToMemoryKHR( + static_cast( m_commandBuffer ), + reinterpret_cast( &info ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::copyBuffer( + VULKAN_HPP_NAMESPACE::Buffer srcBuffer, + VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + ArrayProxy const & regions ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdCopyBuffer( static_cast( m_commandBuffer ), + static_cast( srcBuffer ), + static_cast( dstBuffer ), + regions.size(), + reinterpret_cast( regions.data() ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::copyBuffer2KHR( const CopyBufferInfo2KHR & copyBufferInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdCopyBuffer2KHR( static_cast( m_commandBuffer ), + reinterpret_cast( ©BufferInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage( + VULKAN_HPP_NAMESPACE::Buffer srcBuffer, + VULKAN_HPP_NAMESPACE::Image dstImage, + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, + ArrayProxy const & regions ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdCopyBufferToImage( static_cast( m_commandBuffer ), + static_cast( srcBuffer ), + static_cast( dstImage ), + static_cast( dstImageLayout ), + regions.size(), + reinterpret_cast( regions.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage2KHR( + const CopyBufferToImageInfo2KHR & copyBufferToImageInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdCopyBufferToImage2KHR( + static_cast( m_commandBuffer ), + reinterpret_cast( ©BufferToImageInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::copyImage( + VULKAN_HPP_NAMESPACE::Image srcImage, + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, + VULKAN_HPP_NAMESPACE::Image dstImage, + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, + ArrayProxy const & regions ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdCopyImage( static_cast( m_commandBuffer ), + static_cast( srcImage ), + static_cast( srcImageLayout ), + static_cast( dstImage ), + static_cast( dstImageLayout ), + regions.size(), + reinterpret_cast( regions.data() ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::copyImage2KHR( const CopyImageInfo2KHR & copyImageInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdCopyImage2KHR( static_cast( m_commandBuffer ), + reinterpret_cast( ©ImageInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer( + VULKAN_HPP_NAMESPACE::Image srcImage, + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, + VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + ArrayProxy const & regions ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdCopyImageToBuffer( static_cast( m_commandBuffer ), + static_cast( srcImage ), + static_cast( srcImageLayout ), + static_cast( dstBuffer ), + regions.size(), + reinterpret_cast( regions.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer2KHR( + const CopyImageToBufferInfo2KHR & copyImageToBufferInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdCopyImageToBuffer2KHR( + static_cast( m_commandBuffer ), + reinterpret_cast( ©ImageToBufferInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::copyMemoryToAccelerationStructureKHR( + const CopyMemoryToAccelerationStructureInfoKHR & info ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdCopyMemoryToAccelerationStructureKHR( + static_cast( m_commandBuffer ), + reinterpret_cast( &info ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::copyQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, + VULKAN_HPP_NAMESPACE::DeviceSize stride, + VULKAN_HPP_NAMESPACE::QueryResultFlags flags ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdCopyQueryPoolResults( static_cast( m_commandBuffer ), + static_cast( queryPool ), + firstQuery, + queryCount, + static_cast( dstBuffer ), + static_cast( dstOffset ), + static_cast( stride ), + static_cast( flags ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::debugMarkerBeginEXT( const DebugMarkerMarkerInfoEXT & markerInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDebugMarkerBeginEXT( static_cast( m_commandBuffer ), + reinterpret_cast( &markerInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerEndEXT() const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDebugMarkerEndEXT( static_cast( m_commandBuffer ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::debugMarkerInsertEXT( const DebugMarkerMarkerInfoEXT & markerInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDebugMarkerInsertEXT( static_cast( m_commandBuffer ), + reinterpret_cast( &markerInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::dispatch( uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDispatch( + static_cast( m_commandBuffer ), groupCountX, groupCountY, groupCountZ ); + } + + VULKAN_HPP_INLINE void CommandBuffer::dispatchBase( uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDispatchBase( static_cast( m_commandBuffer ), + baseGroupX, + baseGroupY, + baseGroupZ, + groupCountX, + groupCountY, + groupCountZ ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::dispatchIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDispatchIndirect( static_cast( m_commandBuffer ), + static_cast( buffer ), + static_cast( offset ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::draw( uint32_t vertexCount, + uint32_t instanceCount, + uint32_t firstVertex, + uint32_t firstInstance ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDraw( + static_cast( m_commandBuffer ), vertexCount, instanceCount, firstVertex, firstInstance ); + } + + VULKAN_HPP_INLINE void CommandBuffer::drawIndexed( uint32_t indexCount, + uint32_t instanceCount, + uint32_t firstIndex, + int32_t vertexOffset, + uint32_t firstInstance ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDrawIndexed( static_cast( m_commandBuffer ), + indexCount, + instanceCount, + firstIndex, + vertexOffset, + firstInstance ); + } + + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + uint32_t drawCount, + uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDrawIndexedIndirect( static_cast( m_commandBuffer ), + static_cast( buffer ), + static_cast( offset ), + drawCount, + stride ); + } + + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::Buffer countBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDrawIndexedIndirectCount( static_cast( m_commandBuffer ), + static_cast( buffer ), + static_cast( offset ), + static_cast( countBuffer ), + static_cast( countBufferOffset ), + maxDrawCount, + stride ); + } + + VULKAN_HPP_INLINE void CommandBuffer::drawIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + uint32_t drawCount, + uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDrawIndirect( static_cast( m_commandBuffer ), + static_cast( buffer ), + static_cast( offset ), + drawCount, + stride ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::drawIndirectByteCountEXT( uint32_t instanceCount, + uint32_t firstInstance, + VULKAN_HPP_NAMESPACE::Buffer counterBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize counterBufferOffset, + uint32_t counterOffset, + uint32_t vertexStride ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDrawIndirectByteCountEXT( static_cast( m_commandBuffer ), + instanceCount, + firstInstance, + static_cast( counterBuffer ), + static_cast( counterBufferOffset ), + counterOffset, + vertexStride ); + } + + VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::Buffer countBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDrawIndirectCount( static_cast( m_commandBuffer ), + static_cast( buffer ), + static_cast( offset ), + static_cast( countBuffer ), + static_cast( countBufferOffset ), + maxDrawCount, + stride ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::drawMeshTasksIndirectCountNV( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::Buffer countBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDrawMeshTasksIndirectCountNV( static_cast( m_commandBuffer ), + static_cast( buffer ), + static_cast( offset ), + static_cast( countBuffer ), + static_cast( countBufferOffset ), + maxDrawCount, + stride ); + } + + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksIndirectNV( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + uint32_t drawCount, + uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDrawMeshTasksIndirectNV( static_cast( m_commandBuffer ), + static_cast( buffer ), + static_cast( offset ), + drawCount, + stride ); + } + + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksNV( uint32_t taskCount, + uint32_t firstTask ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdDrawMeshTasksNV( static_cast( m_commandBuffer ), taskCount, firstTask ); + } + + VULKAN_HPP_INLINE void CommandBuffer::endConditionalRenderingEXT() const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdEndConditionalRenderingEXT( static_cast( m_commandBuffer ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::endDebugUtilsLabelEXT() const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdEndDebugUtilsLabelEXT( static_cast( m_commandBuffer ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::endQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t query ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdEndQuery( + static_cast( m_commandBuffer ), static_cast( queryPool ), query ); + } + + VULKAN_HPP_INLINE void CommandBuffer::endQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t query, + uint32_t index ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdEndQueryIndexedEXT( + static_cast( m_commandBuffer ), static_cast( queryPool ), query, index ); + } + + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass() const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdEndRenderPass( static_cast( m_commandBuffer ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::endRenderPass2( const SubpassEndInfo & subpassEndInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdEndRenderPass2( static_cast( m_commandBuffer ), + reinterpret_cast( &subpassEndInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::endTransformFeedbackEXT( + uint32_t firstCounterBuffer, + ArrayProxy const & counterBuffers, + ArrayProxy const & counterBufferOffsets ) const + VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +# ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( counterBufferOffsets.empty() || counterBuffers.size() == counterBufferOffsets.size() ); +# else + if ( !counterBufferOffsets.empty() && counterBuffers.size() != counterBufferOffsets.size() ) + { + throw LogicError( + VULKAN_HPP_NAMESPACE_STRING + "::CommandBuffer::endTransformFeedbackEXT: counterBuffers.size() != counterBufferOffsets.size()" ); + } +# endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + getDispatcher()->vkCmdEndTransformFeedbackEXT( + static_cast( m_commandBuffer ), + firstCounterBuffer, + counterBuffers.size(), + reinterpret_cast( counterBuffers.data() ), + reinterpret_cast( counterBufferOffsets.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::executeCommands( + ArrayProxy const & commandBuffers ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdExecuteCommands( static_cast( m_commandBuffer ), + commandBuffers.size(), + reinterpret_cast( commandBuffers.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::executeGeneratedCommandsNV( + VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, + const GeneratedCommandsInfoNV & generatedCommandsInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdExecuteGeneratedCommandsNV( + static_cast( m_commandBuffer ), + static_cast( isPreprocessed ), + reinterpret_cast( &generatedCommandsInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::fillBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, + VULKAN_HPP_NAMESPACE::DeviceSize size, + uint32_t data ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdFillBuffer( static_cast( m_commandBuffer ), + static_cast( dstBuffer ), + static_cast( dstOffset ), + static_cast( size ), + data ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdInsertDebugUtilsLabelEXT( static_cast( m_commandBuffer ), + reinterpret_cast( &labelInfo ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::nextSubpass( VULKAN_HPP_NAMESPACE::SubpassContents contents ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdNextSubpass( static_cast( m_commandBuffer ), + static_cast( contents ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::nextSubpass2( const SubpassBeginInfo & subpassBeginInfo, + const SubpassEndInfo & subpassEndInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdNextSubpass2( static_cast( m_commandBuffer ), + reinterpret_cast( &subpassBeginInfo ), + reinterpret_cast( &subpassEndInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::pipelineBarrier( + VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, + VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, + VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, + ArrayProxy const & memoryBarriers, + ArrayProxy const & bufferMemoryBarriers, + ArrayProxy const & imageMemoryBarriers ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdPipelineBarrier( + static_cast( m_commandBuffer ), + static_cast( srcStageMask ), + static_cast( dstStageMask ), + static_cast( dependencyFlags ), + memoryBarriers.size(), + reinterpret_cast( memoryBarriers.data() ), + bufferMemoryBarriers.size(), + reinterpret_cast( bufferMemoryBarriers.data() ), + imageMemoryBarriers.size(), + reinterpret_cast( imageMemoryBarriers.data() ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::pipelineBarrier2KHR( const DependencyInfoKHR & dependencyInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdPipelineBarrier2KHR( static_cast( m_commandBuffer ), + reinterpret_cast( &dependencyInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::preprocessGeneratedCommandsNV( + const GeneratedCommandsInfoNV & generatedCommandsInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdPreprocessGeneratedCommandsNV( + static_cast( m_commandBuffer ), + reinterpret_cast( &generatedCommandsInfo ) ); + } + + template + VULKAN_HPP_INLINE void CommandBuffer::pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, + VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, + uint32_t offset, + ArrayProxy const & values ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdPushConstants( static_cast( m_commandBuffer ), + static_cast( layout ), + static_cast( stageFlags ), + offset, + values.size() * sizeof( T ), + reinterpret_cast( values.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetKHR( + VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + ArrayProxy const & descriptorWrites ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdPushDescriptorSetKHR( + static_cast( m_commandBuffer ), + static_cast( pipelineBindPoint ), + static_cast( layout ), + set, + descriptorWrites.size(), + reinterpret_cast( descriptorWrites.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetWithTemplateKHR( + VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + const void * pData ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdPushDescriptorSetWithTemplateKHR( + static_cast( m_commandBuffer ), + static_cast( descriptorUpdateTemplate ), + static_cast( layout ), + set, + pData ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::resetEvent( VULKAN_HPP_NAMESPACE::Event event, + VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdResetEvent( static_cast( m_commandBuffer ), + static_cast( event ), + static_cast( stageMask ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::resetEvent2KHR( VULKAN_HPP_NAMESPACE::Event event, + VULKAN_HPP_NAMESPACE::PipelineStageFlags2KHR stageMask ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdResetEvent2KHR( static_cast( m_commandBuffer ), + static_cast( event ), + static_cast( stageMask ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdResetQueryPool( static_cast( m_commandBuffer ), + static_cast( queryPool ), + firstQuery, + queryCount ); + } + + VULKAN_HPP_INLINE void CommandBuffer::resolveImage( + VULKAN_HPP_NAMESPACE::Image srcImage, + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, + VULKAN_HPP_NAMESPACE::Image dstImage, + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, + ArrayProxy const & regions ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdResolveImage( static_cast( m_commandBuffer ), + static_cast( srcImage ), + static_cast( srcImageLayout ), + static_cast( dstImage ), + static_cast( dstImageLayout ), + regions.size(), + reinterpret_cast( regions.data() ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::resolveImage2KHR( const ResolveImageInfo2KHR & resolveImageInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdResolveImage2KHR( static_cast( m_commandBuffer ), + reinterpret_cast( &resolveImageInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setBlendConstants( const float blendConstants[4] ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetBlendConstants( static_cast( m_commandBuffer ), blendConstants ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setCheckpointNV( const void * pCheckpointMarker ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetCheckpointNV( static_cast( m_commandBuffer ), pCheckpointMarker ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setCoarseSampleOrderNV( + VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, + ArrayProxy const & customSampleOrders ) const + VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetCoarseSampleOrderNV( + static_cast( m_commandBuffer ), + static_cast( sampleOrderType ), + customSampleOrders.size(), + reinterpret_cast( customSampleOrders.data() ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::setCullModeEXT( VULKAN_HPP_NAMESPACE::CullModeFlags cullMode ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetCullModeEXT( static_cast( m_commandBuffer ), + static_cast( cullMode ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setDepthBias( float depthBiasConstantFactor, + float depthBiasClamp, + float depthBiasSlopeFactor ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetDepthBias( static_cast( m_commandBuffer ), + depthBiasConstantFactor, + depthBiasClamp, + depthBiasSlopeFactor ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setDepthBounds( float minDepthBounds, + float maxDepthBounds ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetDepthBounds( + static_cast( m_commandBuffer ), minDepthBounds, maxDepthBounds ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setDepthBoundsTestEnableEXT( + VULKAN_HPP_NAMESPACE::Bool32 depthBoundsTestEnable ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetDepthBoundsTestEnableEXT( static_cast( m_commandBuffer ), + static_cast( depthBoundsTestEnable ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::setDepthCompareOpEXT( VULKAN_HPP_NAMESPACE::CompareOp depthCompareOp ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetDepthCompareOpEXT( static_cast( m_commandBuffer ), + static_cast( depthCompareOp ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::setDepthTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthTestEnable ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetDepthTestEnableEXT( static_cast( m_commandBuffer ), + static_cast( depthTestEnable ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::setDepthWriteEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthWriteEnable ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetDepthWriteEnableEXT( static_cast( m_commandBuffer ), + static_cast( depthWriteEnable ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setDeviceMask( uint32_t deviceMask ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetDeviceMask( static_cast( m_commandBuffer ), deviceMask ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setDiscardRectangleEXT( + uint32_t firstDiscardRectangle, + ArrayProxy const & discardRectangles ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetDiscardRectangleEXT( static_cast( m_commandBuffer ), + firstDiscardRectangle, + discardRectangles.size(), + reinterpret_cast( discardRectangles.data() ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::setEvent( VULKAN_HPP_NAMESPACE::Event event, + VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetEvent( static_cast( m_commandBuffer ), + static_cast( event ), + static_cast( stageMask ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::setEvent2KHR( VULKAN_HPP_NAMESPACE::Event event, + const DependencyInfoKHR & dependencyInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetEvent2KHR( static_cast( m_commandBuffer ), + static_cast( event ), + reinterpret_cast( &dependencyInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setExclusiveScissorNV( + uint32_t firstExclusiveScissor, + ArrayProxy const & exclusiveScissors ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetExclusiveScissorNV( static_cast( m_commandBuffer ), + firstExclusiveScissor, + exclusiveScissors.size(), + reinterpret_cast( exclusiveScissors.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setFragmentShadingRateEnumNV( + VULKAN_HPP_NAMESPACE::FragmentShadingRateNV shadingRate, + const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2] ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetFragmentShadingRateEnumNV( + static_cast( m_commandBuffer ), + static_cast( shadingRate ), + reinterpret_cast( combinerOps ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setFragmentShadingRateKHR( + const Extent2D & fragmentSize, + const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2] ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetFragmentShadingRateKHR( + static_cast( m_commandBuffer ), + reinterpret_cast( &fragmentSize ), + reinterpret_cast( combinerOps ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::setFrontFaceEXT( VULKAN_HPP_NAMESPACE::FrontFace frontFace ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetFrontFaceEXT( static_cast( m_commandBuffer ), + static_cast( frontFace ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setLineStippleEXT( uint32_t lineStippleFactor, + uint16_t lineStipplePattern ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetLineStippleEXT( + static_cast( m_commandBuffer ), lineStippleFactor, lineStipplePattern ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setLineWidth( float lineWidth ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetLineWidth( static_cast( m_commandBuffer ), lineWidth ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::setPerformanceMarkerINTEL( const PerformanceMarkerInfoINTEL & markerInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCmdSetPerformanceMarkerINTEL( + static_cast( m_commandBuffer ), + reinterpret_cast( &markerInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceMarkerINTEL" ); + } + } + + VULKAN_HPP_INLINE void + CommandBuffer::setPerformanceOverrideINTEL( const PerformanceOverrideInfoINTEL & overrideInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCmdSetPerformanceOverrideINTEL( + static_cast( m_commandBuffer ), + reinterpret_cast( &overrideInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceOverrideINTEL" ); + } + } + + VULKAN_HPP_INLINE void + CommandBuffer::setPerformanceStreamMarkerINTEL( const PerformanceStreamMarkerInfoINTEL & markerInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCmdSetPerformanceStreamMarkerINTEL( + static_cast( m_commandBuffer ), + reinterpret_cast( &markerInfo ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceStreamMarkerINTEL" ); + } + } + + VULKAN_HPP_INLINE void CommandBuffer::setPrimitiveTopologyEXT( + VULKAN_HPP_NAMESPACE::PrimitiveTopology primitiveTopology ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetPrimitiveTopologyEXT( static_cast( m_commandBuffer ), + static_cast( primitiveTopology ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::setRayTracingPipelineStackSizeKHR( uint32_t pipelineStackSize ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetRayTracingPipelineStackSizeKHR( static_cast( m_commandBuffer ), + pipelineStackSize ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setSampleLocationsEXT( + const SampleLocationsInfoEXT & sampleLocationsInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetSampleLocationsEXT( + static_cast( m_commandBuffer ), + reinterpret_cast( &sampleLocationsInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setScissor( + uint32_t firstScissor, ArrayProxy const & scissors ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetScissor( static_cast( m_commandBuffer ), + firstScissor, + scissors.size(), + reinterpret_cast( scissors.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setScissorWithCountEXT( + ArrayProxy const & scissors ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetScissorWithCountEXT( static_cast( m_commandBuffer ), + scissors.size(), + reinterpret_cast( scissors.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setStencilCompareMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, + uint32_t compareMask ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetStencilCompareMask( + static_cast( m_commandBuffer ), static_cast( faceMask ), compareMask ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::setStencilOpEXT( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, + VULKAN_HPP_NAMESPACE::StencilOp failOp, + VULKAN_HPP_NAMESPACE::StencilOp passOp, + VULKAN_HPP_NAMESPACE::StencilOp depthFailOp, + VULKAN_HPP_NAMESPACE::CompareOp compareOp ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetStencilOpEXT( static_cast( m_commandBuffer ), + static_cast( faceMask ), + static_cast( failOp ), + static_cast( passOp ), + static_cast( depthFailOp ), + static_cast( compareOp ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setStencilReference( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, + uint32_t reference ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetStencilReference( + static_cast( m_commandBuffer ), static_cast( faceMask ), reference ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::setStencilTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 stencilTestEnable ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetStencilTestEnableEXT( static_cast( m_commandBuffer ), + static_cast( stencilTestEnable ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setStencilWriteMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, + uint32_t writeMask ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetStencilWriteMask( + static_cast( m_commandBuffer ), static_cast( faceMask ), writeMask ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setViewport( + uint32_t firstViewport, + ArrayProxy const & viewports ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetViewport( static_cast( m_commandBuffer ), + firstViewport, + viewports.size(), + reinterpret_cast( viewports.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setViewportShadingRatePaletteNV( + uint32_t firstViewport, ArrayProxy const & shadingRatePalettes ) + const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetViewportShadingRatePaletteNV( + static_cast( m_commandBuffer ), + firstViewport, + shadingRatePalettes.size(), + reinterpret_cast( shadingRatePalettes.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setViewportWScalingNV( + uint32_t firstViewport, + ArrayProxy const & viewportWScalings ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetViewportWScalingNV( + static_cast( m_commandBuffer ), + firstViewport, + viewportWScalings.size(), + reinterpret_cast( viewportWScalings.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setViewportWithCountEXT( + ArrayProxy const & viewports ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdSetViewportWithCountEXT( static_cast( m_commandBuffer ), + viewports.size(), + reinterpret_cast( viewports.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::traceRaysIndirectKHR( + const StridedDeviceAddressRegionKHR & raygenShaderBindingTable, + const StridedDeviceAddressRegionKHR & missShaderBindingTable, + const StridedDeviceAddressRegionKHR & hitShaderBindingTable, + const StridedDeviceAddressRegionKHR & callableShaderBindingTable, + VULKAN_HPP_NAMESPACE::DeviceAddress indirectDeviceAddress ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdTraceRaysIndirectKHR( + static_cast( m_commandBuffer ), + reinterpret_cast( &raygenShaderBindingTable ), + reinterpret_cast( &missShaderBindingTable ), + reinterpret_cast( &hitShaderBindingTable ), + reinterpret_cast( &callableShaderBindingTable ), + static_cast( indirectDeviceAddress ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::traceRaysKHR( const StridedDeviceAddressRegionKHR & raygenShaderBindingTable, + const StridedDeviceAddressRegionKHR & missShaderBindingTable, + const StridedDeviceAddressRegionKHR & hitShaderBindingTable, + const StridedDeviceAddressRegionKHR & callableShaderBindingTable, + uint32_t width, + uint32_t height, + uint32_t depth ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdTraceRaysKHR( + static_cast( m_commandBuffer ), + reinterpret_cast( &raygenShaderBindingTable ), + reinterpret_cast( &missShaderBindingTable ), + reinterpret_cast( &hitShaderBindingTable ), + reinterpret_cast( &callableShaderBindingTable ), + width, + height, + depth ); + } + + VULKAN_HPP_INLINE void CommandBuffer::traceRaysNV( VULKAN_HPP_NAMESPACE::Buffer raygenShaderBindingTableBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize raygenShaderBindingOffset, + VULKAN_HPP_NAMESPACE::Buffer missShaderBindingTableBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingOffset, + VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingStride, + VULKAN_HPP_NAMESPACE::Buffer hitShaderBindingTableBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingOffset, + VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingStride, + VULKAN_HPP_NAMESPACE::Buffer callableShaderBindingTableBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingOffset, + VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingStride, + uint32_t width, + uint32_t height, + uint32_t depth ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdTraceRaysNV( static_cast( m_commandBuffer ), + static_cast( raygenShaderBindingTableBuffer ), + static_cast( raygenShaderBindingOffset ), + static_cast( missShaderBindingTableBuffer ), + static_cast( missShaderBindingOffset ), + static_cast( missShaderBindingStride ), + static_cast( hitShaderBindingTableBuffer ), + static_cast( hitShaderBindingOffset ), + static_cast( hitShaderBindingStride ), + static_cast( callableShaderBindingTableBuffer ), + static_cast( callableShaderBindingOffset ), + static_cast( callableShaderBindingStride ), + width, + height, + depth ); + } + + template + VULKAN_HPP_INLINE void CommandBuffer::updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, + ArrayProxy const & data ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdUpdateBuffer( static_cast( m_commandBuffer ), + static_cast( dstBuffer ), + static_cast( dstOffset ), + data.size() * sizeof( T ), + reinterpret_cast( data.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::waitEvents( + ArrayProxy const & events, + VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, + VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, + ArrayProxy const & memoryBarriers, + ArrayProxy const & bufferMemoryBarriers, + ArrayProxy const & imageMemoryBarriers ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdWaitEvents( static_cast( m_commandBuffer ), + events.size(), + reinterpret_cast( events.data() ), + static_cast( srcStageMask ), + static_cast( dstStageMask ), + memoryBarriers.size(), + reinterpret_cast( memoryBarriers.data() ), + bufferMemoryBarriers.size(), + reinterpret_cast( bufferMemoryBarriers.data() ), + imageMemoryBarriers.size(), + reinterpret_cast( imageMemoryBarriers.data() ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::waitEvents2KHR( ArrayProxy const & events, + ArrayProxy const & dependencyInfos ) + const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +# ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( events.size() == dependencyInfos.size() ); +# else + if ( events.size() != dependencyInfos.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING + "::CommandBuffer::waitEvents2KHR: events.size() != dependencyInfos.size()" ); + } +# endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + getDispatcher()->vkCmdWaitEvents2KHR( static_cast( m_commandBuffer ), + events.size(), + reinterpret_cast( events.data() ), + reinterpret_cast( dependencyInfos.data() ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesKHR( + ArrayProxy const & accelerationStructures, + VULKAN_HPP_NAMESPACE::QueryType queryType, + VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t firstQuery ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdWriteAccelerationStructuresPropertiesKHR( + static_cast( m_commandBuffer ), + accelerationStructures.size(), + reinterpret_cast( accelerationStructures.data() ), + static_cast( queryType ), + static_cast( queryPool ), + firstQuery ); + } + + VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesNV( + ArrayProxy const & accelerationStructures, + VULKAN_HPP_NAMESPACE::QueryType queryType, + VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t firstQuery ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdWriteAccelerationStructuresPropertiesNV( + static_cast( m_commandBuffer ), + accelerationStructures.size(), + reinterpret_cast( accelerationStructures.data() ), + static_cast( queryType ), + static_cast( queryPool ), + firstQuery ); + } + + VULKAN_HPP_INLINE void CommandBuffer::writeBufferMarker2AMD( VULKAN_HPP_NAMESPACE::PipelineStageFlags2KHR stage, + VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, + uint32_t marker ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdWriteBufferMarker2AMD( static_cast( m_commandBuffer ), + static_cast( stage ), + static_cast( dstBuffer ), + static_cast( dstOffset ), + marker ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::writeBufferMarkerAMD( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, + VULKAN_HPP_NAMESPACE::Buffer dstBuffer, + VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, + uint32_t marker ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdWriteBufferMarkerAMD( static_cast( m_commandBuffer ), + static_cast( pipelineStage ), + static_cast( dstBuffer ), + static_cast( dstOffset ), + marker ); + } + + VULKAN_HPP_INLINE void CommandBuffer::writeTimestamp( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, + VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t query ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdWriteTimestamp( static_cast( m_commandBuffer ), + static_cast( pipelineStage ), + static_cast( queryPool ), + query ); + } + + VULKAN_HPP_INLINE void CommandBuffer::writeTimestamp2KHR( VULKAN_HPP_NAMESPACE::PipelineStageFlags2KHR stage, + VULKAN_HPP_NAMESPACE::QueryPool queryPool, + uint32_t query ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkCmdWriteTimestamp2KHR( static_cast( m_commandBuffer ), + static_cast( stage ), + static_cast( queryPool ), + query ); + } + + VULKAN_HPP_INLINE void CommandBuffer::end() const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkEndCommandBuffer( static_cast( m_commandBuffer ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::end" ); + } + } + + VULKAN_HPP_INLINE void CommandBuffer::reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkResetCommandBuffer( + static_cast( m_commandBuffer ), static_cast( flags ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::reset" ); + } + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result DeferredOperationKHR::join() const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkDeferredOperationJoinKHR( + static_cast( m_device ), static_cast( m_deferredOperationKHR ) ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eThreadDoneKHR ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eThreadIdleKHR ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::DeferredOperationKHR::join" ); + } + return result; + } + + VULKAN_HPP_NODISCARD uint32_t DeferredOperationKHR::getMaxConcurrency() const VULKAN_HPP_NOEXCEPT + { + return getDispatcher()->vkGetDeferredOperationMaxConcurrencyKHR( + static_cast( m_device ), static_cast( m_deferredOperationKHR ) ); + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result + DeferredOperationKHR::getResult() const VULKAN_HPP_NOEXCEPT + { + return static_cast( getDispatcher()->vkGetDeferredOperationResultKHR( + static_cast( m_device ), static_cast( m_deferredOperationKHR ) ) ); + } + + VULKAN_HPP_INLINE void + DescriptorPool::reset( VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkResetDescriptorPool( static_cast( m_device ), + static_cast( m_descriptorPool ), + static_cast( flags ) ); + } + + VULKAN_HPP_INLINE void + DescriptorSet::updateWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, + const void * pData ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkUpdateDescriptorSetWithTemplate( + static_cast( m_device ), + static_cast( m_descriptorSet ), + static_cast( descriptorUpdateTemplate ), + pData ); + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DeviceSize DeviceMemory::getCommitment() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::DeviceSize committedMemoryInBytes; + getDispatcher()->vkGetDeviceMemoryCommitment( static_cast( m_device ), + static_cast( m_deviceMemory ), + reinterpret_cast( &committedMemoryInBytes ) ); + return committedMemoryInBytes; + } + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_NODISCARD HANDLE + DeviceMemory::getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType ) const + { + HANDLE handle; + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkGetMemoryWin32HandleNV( static_cast( m_device ), + static_cast( m_deviceMemory ), + static_cast( handleType ), + &handle ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::DeviceMemory::getMemoryWin32HandleNV" ); + } + return handle; + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD void * DeviceMemory::mapMemory( VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size, + VULKAN_HPP_NAMESPACE::MemoryMapFlags flags ) const + { + void * pData; + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkMapMemory( static_cast( m_device ), + static_cast( m_deviceMemory ), + static_cast( offset ), + static_cast( size ), + static_cast( flags ), + &pData ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::DeviceMemory::mapMemory" ); + } + return pData; + } + + VULKAN_HPP_INLINE void DeviceMemory::unmapMemory() const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkUnmapMemory( static_cast( m_device ), + static_cast( m_deviceMemory ) ); + } + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_INLINE void DisplayKHR::acquireWinrtNV() const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkAcquireWinrtDisplayNV( + static_cast( m_physicalDevice ), static_cast( m_displayKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::DisplayKHR::acquireWinrtNV" ); + } + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD std::vector + DisplayKHR::getModeProperties2() const + { + std::vector properties; + uint32_t propertyCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( + getDispatcher()->vkGetDisplayModeProperties2KHR( static_cast( m_physicalDevice ), + static_cast( m_displayKHR ), + &propertyCount, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( getDispatcher()->vkGetDisplayModeProperties2KHR( + static_cast( m_physicalDevice ), + static_cast( m_displayKHR ), + &propertyCount, + reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::DisplayKHR::getModeProperties2" ); + } + return properties; + } + + VULKAN_HPP_NODISCARD std::vector + DisplayKHR::getModeProperties() const + { + std::vector properties; + uint32_t propertyCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( + getDispatcher()->vkGetDisplayModePropertiesKHR( static_cast( m_physicalDevice ), + static_cast( m_displayKHR ), + &propertyCount, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( getDispatcher()->vkGetDisplayModePropertiesKHR( + static_cast( m_physicalDevice ), + static_cast( m_displayKHR ), + &propertyCount, + reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::DisplayKHR::getModeProperties" ); + } + return properties; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR + DisplayModeKHR::getDisplayPlaneCapabilities( uint32_t planeIndex ) const + { + VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR capabilities; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetDisplayPlaneCapabilitiesKHR( + static_cast( m_physicalDevice ), + static_cast( m_displayModeKHR ), + planeIndex, + reinterpret_cast( &capabilities ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::DisplayModeKHR::getDisplayPlaneCapabilities" ); + } + return capabilities; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Event::getStatus() const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkGetEventStatus( static_cast( m_device ), static_cast( m_event ) ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eEventSet ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eEventReset ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Event::getStatus" ); + } + return result; + } + + VULKAN_HPP_INLINE void Event::reset() const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkResetEvent( static_cast( m_device ), static_cast( m_event ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Event::reset" ); + } + } + + VULKAN_HPP_INLINE void Event::set() const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkSetEvent( static_cast( m_device ), static_cast( m_event ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Event::set" ); + } + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Fence::getStatus() const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkGetFenceStatus( static_cast( m_device ), static_cast( m_fence ) ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eNotReady ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Fence::getStatus" ); + } + return result; + } + + VULKAN_HPP_INLINE void Image::bindMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, + VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset ) const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkBindImageMemory( static_cast( m_device ), + static_cast( m_image ), + static_cast( memory ), + static_cast( memoryOffset ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Image::bindMemory" ); + } + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT + Image::getDrmFormatModifierPropertiesEXT() const + { + VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT properties; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetImageDrmFormatModifierPropertiesEXT( + static_cast( m_device ), + static_cast( m_image ), + reinterpret_cast( &properties ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Image::getDrmFormatModifierPropertiesEXT" ); + } + return properties; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements + Image::getMemoryRequirements() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements memoryRequirements; + getDispatcher()->vkGetImageMemoryRequirements( static_cast( m_device ), + static_cast( m_image ), + reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + + VULKAN_HPP_NODISCARD std::vector + Image::getSparseMemoryRequirements() const VULKAN_HPP_NOEXCEPT + { + uint32_t sparseMemoryRequirementCount; + getDispatcher()->vkGetImageSparseMemoryRequirements( + static_cast( m_device ), static_cast( m_image ), &sparseMemoryRequirementCount, nullptr ); + std::vector sparseMemoryRequirements( + sparseMemoryRequirementCount ); + getDispatcher()->vkGetImageSparseMemoryRequirements( + static_cast( m_device ), + static_cast( m_image ), + &sparseMemoryRequirementCount, + reinterpret_cast( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); + return sparseMemoryRequirements; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout + Image::getSubresourceLayout( const ImageSubresource & subresource ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::SubresourceLayout layout; + getDispatcher()->vkGetImageSubresourceLayout( static_cast( m_device ), + static_cast( m_image ), + reinterpret_cast( &subresource ), + reinterpret_cast( &layout ) ); + return layout; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX ImageView::getAddressNVX() const + { + VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX properties; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetImageViewAddressNVX( + static_cast( m_device ), + static_cast( m_imageView ), + reinterpret_cast( &properties ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::ImageView::getAddressNVX" ); + } + return properties; + } + + VULKAN_HPP_NODISCARD std::vector PipelineCache::getData() const + { + std::vector data; + size_t dataSize; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( getDispatcher()->vkGetPipelineCacheData( + static_cast( m_device ), static_cast( m_pipelineCache ), &dataSize, nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && dataSize ) + { + data.resize( dataSize ); + result = static_cast( + getDispatcher()->vkGetPipelineCacheData( static_cast( m_device ), + static_cast( m_pipelineCache ), + &dataSize, + reinterpret_cast( data.data() ) ) ); + VULKAN_HPP_ASSERT( dataSize <= data.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( dataSize < data.size() ) ) + { + data.resize( dataSize ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PipelineCache::getData" ); + } + return data; + } + + VULKAN_HPP_INLINE void + PipelineCache::merge( ArrayProxy const & srcCaches ) const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkMergePipelineCaches( static_cast( m_device ), + static_cast( m_pipelineCache ), + srcCaches.size(), + reinterpret_cast( srcCaches.data() ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::PipelineCache::merge" ); + } + } + + VULKAN_HPP_INLINE void Pipeline::compileDeferredNV( uint32_t shader ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkCompileDeferredNV( + static_cast( m_device ), static_cast( m_pipeline ), shader ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::compileDeferredNV" ); + } + } + + template + VULKAN_HPP_NODISCARD std::vector Pipeline::getRayTracingCaptureReplayShaderGroupHandlesKHR( + uint32_t firstGroup, uint32_t groupCount, size_t dataSize ) const + { + VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); + std::vector data( dataSize / sizeof( T ) ); + Result result = static_cast( + getDispatcher()->vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( static_cast( m_device ), + static_cast( m_pipeline ), + firstGroup, + groupCount, + data.size() * sizeof( T ), + reinterpret_cast( data.data() ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( + result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingCaptureReplayShaderGroupHandlesKHR" ); + } + return data; + } + + template + VULKAN_HPP_NODISCARD T Pipeline::getRayTracingCaptureReplayShaderGroupHandleKHR( uint32_t firstGroup, + uint32_t groupCount ) const + { + T data; + Result result = static_cast( + getDispatcher()->vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( static_cast( m_device ), + static_cast( m_pipeline ), + firstGroup, + groupCount, + sizeof( T ), + reinterpret_cast( &data ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( + result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingCaptureReplayShaderGroupHandleKHR" ); + } + return data; + } + + template + VULKAN_HPP_NODISCARD std::vector + Pipeline::getRayTracingShaderGroupHandlesKHR( uint32_t firstGroup, uint32_t groupCount, size_t dataSize ) const + { + VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); + std::vector data( dataSize / sizeof( T ) ); + Result result = static_cast( + getDispatcher()->vkGetRayTracingShaderGroupHandlesKHR( static_cast( m_device ), + static_cast( m_pipeline ), + firstGroup, + groupCount, + data.size() * sizeof( T ), + reinterpret_cast( data.data() ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingShaderGroupHandlesKHR" ); + } + return data; + } + + template + VULKAN_HPP_NODISCARD T Pipeline::getRayTracingShaderGroupHandleKHR( uint32_t firstGroup, uint32_t groupCount ) const + { + T data; + Result result = static_cast( + getDispatcher()->vkGetRayTracingShaderGroupHandlesKHR( static_cast( m_device ), + static_cast( m_pipeline ), + firstGroup, + groupCount, + sizeof( T ), + reinterpret_cast( &data ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingShaderGroupHandleKHR" ); + } + return data; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DeviceSize + Pipeline::getRayTracingShaderGroupStackSizeKHR( + uint32_t group, VULKAN_HPP_NAMESPACE::ShaderGroupShaderKHR groupShader ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( + getDispatcher()->vkGetRayTracingShaderGroupStackSizeKHR( static_cast( m_device ), + static_cast( m_pipeline ), + group, + static_cast( groupShader ) ) ); + } + + VULKAN_HPP_NODISCARD std::vector + Pipeline::getShaderInfoAMD( VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, + VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType ) const + { + std::vector info; + size_t infoSize; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( + getDispatcher()->vkGetShaderInfoAMD( static_cast( m_device ), + static_cast( m_pipeline ), + static_cast( shaderStage ), + static_cast( infoType ), + &infoSize, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && infoSize ) + { + info.resize( infoSize ); + result = static_cast( + getDispatcher()->vkGetShaderInfoAMD( static_cast( m_device ), + static_cast( m_pipeline ), + static_cast( shaderStage ), + static_cast( infoType ), + &infoSize, + reinterpret_cast( info.data() ) ) ); + VULKAN_HPP_ASSERT( infoSize <= info.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( infoSize < info.size() ) ) + { + info.resize( infoSize ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getShaderInfoAMD" ); + } + return info; + } + + template + VULKAN_HPP_NODISCARD std::pair> + QueryPool::getResults( uint32_t firstQuery, + uint32_t queryCount, + size_t dataSize, + VULKAN_HPP_NAMESPACE::DeviceSize stride, + VULKAN_HPP_NAMESPACE::QueryResultFlags flags ) const + { + VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); + std::vector data( dataSize / sizeof( T ) ); + Result result = + static_cast( getDispatcher()->vkGetQueryPoolResults( static_cast( m_device ), + static_cast( m_queryPool ), + firstQuery, + queryCount, + data.size() * sizeof( T ), + reinterpret_cast( data.data() ), + static_cast( stride ), + static_cast( flags ) ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eNotReady ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::QueryPool::getResults" ); + } + return std::make_pair( result, data ); + } + + template + VULKAN_HPP_NODISCARD std::pair + QueryPool::getResult( uint32_t firstQuery, + uint32_t queryCount, + VULKAN_HPP_NAMESPACE::DeviceSize stride, + VULKAN_HPP_NAMESPACE::QueryResultFlags flags ) const + { + T data; + Result result = + static_cast( getDispatcher()->vkGetQueryPoolResults( static_cast( m_device ), + static_cast( m_queryPool ), + firstQuery, + queryCount, + sizeof( T ), + reinterpret_cast( &data ), + static_cast( stride ), + static_cast( flags ) ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eNotReady ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::QueryPool::getResult" ); + } + return std::make_pair( result, data ); + } + + VULKAN_HPP_INLINE void QueryPool::reset( uint32_t firstQuery, uint32_t queryCount ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkResetQueryPool( + static_cast( m_device ), static_cast( m_queryPool ), firstQuery, queryCount ); + } + + VULKAN_HPP_NODISCARD std::vector + Queue::getCheckpointData2NV() const VULKAN_HPP_NOEXCEPT + { + uint32_t checkpointDataCount; + getDispatcher()->vkGetQueueCheckpointData2NV( static_cast( m_queue ), &checkpointDataCount, nullptr ); + std::vector checkpointData( checkpointDataCount ); + getDispatcher()->vkGetQueueCheckpointData2NV( static_cast( m_queue ), + &checkpointDataCount, + reinterpret_cast( checkpointData.data() ) ); + VULKAN_HPP_ASSERT( checkpointDataCount <= checkpointData.size() ); + return checkpointData; + } + + VULKAN_HPP_NODISCARD std::vector + Queue::getCheckpointDataNV() const VULKAN_HPP_NOEXCEPT + { + uint32_t checkpointDataCount; + getDispatcher()->vkGetQueueCheckpointDataNV( static_cast( m_queue ), &checkpointDataCount, nullptr ); + std::vector checkpointData( checkpointDataCount ); + getDispatcher()->vkGetQueueCheckpointDataNV( static_cast( m_queue ), + &checkpointDataCount, + reinterpret_cast( checkpointData.data() ) ); + VULKAN_HPP_ASSERT( checkpointDataCount <= checkpointData.size() ); + return checkpointData; + } + + VULKAN_HPP_INLINE void + Queue::beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkQueueBeginDebugUtilsLabelEXT( static_cast( m_queue ), + reinterpret_cast( &labelInfo ) ); + } + + VULKAN_HPP_INLINE void Queue::bindSparse( ArrayProxy const & bindInfo, + VULKAN_HPP_NAMESPACE::Fence fence ) const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkQueueBindSparse( static_cast( m_queue ), + bindInfo.size(), + reinterpret_cast( bindInfo.data() ), + static_cast( fence ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::bindSparse" ); + } + } + + VULKAN_HPP_INLINE void Queue::endDebugUtilsLabelEXT() const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkQueueEndDebugUtilsLabelEXT( static_cast( m_queue ) ); + } + + VULKAN_HPP_INLINE void + Queue::insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkQueueInsertDebugUtilsLabelEXT( static_cast( m_queue ), + reinterpret_cast( &labelInfo ) ); + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result + Queue::presentKHR( const PresentInfoKHR & presentInfo ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkQueuePresentKHR( + static_cast( m_queue ), reinterpret_cast( &presentInfo ) ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::presentKHR" ); + } + return result; + } + + VULKAN_HPP_INLINE void + Queue::setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkQueueSetPerformanceConfigurationINTEL( + static_cast( m_queue ), static_cast( configuration ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::setPerformanceConfigurationINTEL" ); + } + } + + VULKAN_HPP_INLINE void Queue::submit( ArrayProxy const & submits, + VULKAN_HPP_NAMESPACE::Fence fence ) const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkQueueSubmit( static_cast( m_queue ), + submits.size(), + reinterpret_cast( submits.data() ), + static_cast( fence ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit" ); + } + } + + VULKAN_HPP_INLINE void Queue::submit2KHR( ArrayProxy const & submits, + VULKAN_HPP_NAMESPACE::Fence fence ) const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkQueueSubmit2KHR( static_cast( m_queue ), + submits.size(), + reinterpret_cast( submits.data() ), + static_cast( fence ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit2KHR" ); + } + } + + VULKAN_HPP_INLINE void Queue::waitIdle() const + { + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkQueueWaitIdle( static_cast( m_queue ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::waitIdle" ); + } + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Extent2D RenderPass::getRenderAreaGranularity() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::Extent2D granularity; + getDispatcher()->vkGetRenderAreaGranularity( static_cast( m_device ), + static_cast( m_renderPass ), + reinterpret_cast( &granularity ) ); + return granularity; + } + + VULKAN_HPP_NODISCARD uint64_t Semaphore::getCounterValue() const + { + uint64_t value; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetSemaphoreCounterValue( + static_cast( m_device ), static_cast( m_semaphore ), &value ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::Semaphore::getCounterValue" ); + } + return value; + } + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_INLINE void SwapchainKHR::acquireFullScreenExclusiveModeEXT() const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkAcquireFullScreenExclusiveModeEXT( + static_cast( m_device ), static_cast( m_swapchainKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::acquireFullScreenExclusiveModeEXT" ); + } + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_NODISCARD std::pair SwapchainKHR::acquireNextImage( + uint64_t timeout, VULKAN_HPP_NAMESPACE::Semaphore semaphore, VULKAN_HPP_NAMESPACE::Fence fence ) const + { + uint32_t imageIndex; + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkAcquireNextImageKHR( static_cast( m_device ), + static_cast( m_swapchainKHR ), + timeout, + static_cast( semaphore ), + static_cast( fence ), + &imageIndex ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eTimeout ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eNotReady ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::acquireNextImage" ); + } + return std::make_pair( result, imageIndex ); + } + + VULKAN_HPP_NODISCARD std::vector + SwapchainKHR::getPastPresentationTimingGOOGLE() const + { + std::vector presentationTimings; + uint32_t presentationTimingCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( + getDispatcher()->vkGetPastPresentationTimingGOOGLE( static_cast( m_device ), + static_cast( m_swapchainKHR ), + &presentationTimingCount, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && presentationTimingCount ) + { + presentationTimings.resize( presentationTimingCount ); + result = static_cast( getDispatcher()->vkGetPastPresentationTimingGOOGLE( + static_cast( m_device ), + static_cast( m_swapchainKHR ), + &presentationTimingCount, + reinterpret_cast( presentationTimings.data() ) ) ); + VULKAN_HPP_ASSERT( presentationTimingCount <= presentationTimings.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( presentationTimingCount < presentationTimings.size() ) ) + { + presentationTimings.resize( presentationTimingCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getPastPresentationTimingGOOGLE" ); + } + return presentationTimings; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE + SwapchainKHR::getRefreshCycleDurationGOOGLE() const + { + VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE displayTimingProperties; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetRefreshCycleDurationGOOGLE( + static_cast( m_device ), + static_cast( m_swapchainKHR ), + reinterpret_cast( &displayTimingProperties ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getRefreshCycleDurationGOOGLE" ); + } + return displayTimingProperties; + } + + VULKAN_HPP_NODISCARD uint64_t + SwapchainKHR::getCounterEXT( VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter ) const + { + uint64_t counterValue; + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkGetSwapchainCounterEXT( static_cast( m_device ), + static_cast( m_swapchainKHR ), + static_cast( counter ), + &counterValue ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getCounterEXT" ); + } + return counterValue; + } + + VULKAN_HPP_NODISCARD std::vector SwapchainKHR::getImages() const + { + std::vector swapchainImages; + uint32_t swapchainImageCount; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( + getDispatcher()->vkGetSwapchainImagesKHR( static_cast( m_device ), + static_cast( m_swapchainKHR ), + &swapchainImageCount, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && swapchainImageCount ) + { + swapchainImages.resize( swapchainImageCount ); + result = static_cast( + getDispatcher()->vkGetSwapchainImagesKHR( static_cast( m_device ), + static_cast( m_swapchainKHR ), + &swapchainImageCount, + swapchainImages.data() ) ); + VULKAN_HPP_ASSERT( swapchainImageCount <= swapchainImages.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( swapchainImageCount < swapchainImages.size() ) ) + { + swapchainImages.resize( swapchainImageCount ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getImages" ); + } + return swapchainImages; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result SwapchainKHR::getStatus() const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkGetSwapchainStatusKHR( + static_cast( m_device ), static_cast( m_swapchainKHR ) ) ); + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && + ( result != VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR ) ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getStatus" ); + } + return result; + } + +# ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_INLINE void SwapchainKHR::releaseFullScreenExclusiveModeEXT() const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkReleaseFullScreenExclusiveModeEXT( + static_cast( m_device ), static_cast( m_swapchainKHR ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::releaseFullScreenExclusiveModeEXT" ); + } + } +# endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VULKAN_HPP_INLINE void + SwapchainKHR::setLocalDimmingAMD( VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable ) const VULKAN_HPP_NOEXCEPT + { + getDispatcher()->vkSetLocalDimmingAMD( static_cast( m_device ), + static_cast( m_swapchainKHR ), + static_cast( localDimmingEnable ) ); + } + + VULKAN_HPP_NODISCARD std::vector ValidationCacheEXT::getData() const + { + std::vector data; + size_t dataSize; + VULKAN_HPP_NAMESPACE::Result result; + do + { + result = static_cast( + getDispatcher()->vkGetValidationCacheDataEXT( static_cast( m_device ), + static_cast( m_validationCacheEXT ), + &dataSize, + nullptr ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && dataSize ) + { + data.resize( dataSize ); + result = static_cast( + getDispatcher()->vkGetValidationCacheDataEXT( static_cast( m_device ), + static_cast( m_validationCacheEXT ), + &dataSize, + reinterpret_cast( data.data() ) ) ); + VULKAN_HPP_ASSERT( dataSize <= data.size() ); + } + } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( dataSize < data.size() ) ) + { + data.resize( dataSize ); + } + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::ValidationCacheEXT::getData" ); + } + return data; + } + + VULKAN_HPP_INLINE void + ValidationCacheEXT::merge( ArrayProxy const & srcCaches ) const + { + VULKAN_HPP_NAMESPACE::Result result = + static_cast( getDispatcher()->vkMergeValidationCachesEXT( + static_cast( m_device ), + static_cast( m_validationCacheEXT ), + srcCaches.size(), + reinterpret_cast( srcCaches.data() ) ) ); + if ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + throwResultException( result, VULKAN_HPP_NAMESPACE_STRING "::ValidationCacheEXT::merge" ); + } + } + +#endif + } // namespace VULKAN_HPP_RAII_NAMESPACE +} // namespace VULKAN_HPP_NAMESPACE +#endif